text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
|---|---|---|---|---|---|---|
from app import app, grabber, merge, segment
from flask import render_template, request, url_for, jsonify
import cv2
import numpy as np
import os, re
def rm(dir, pattern):
for f in os.listdir(dir):
if re.search(pattern, f):
os.remove(os.path.join(dir, f))
@app.route('/')
@app.route('/index')
def home():
return render_template('index.html')
@app.route('/grabber/', methods=['POST'])
def doGrabber():
# clean up folders
rm('app/static/img', 'dg*')
rm('app/ma_prediction_400','dg*')
data = request.form
lat = data['lat']
lon = data['lon']
zoom = data['zoom']
with open('app/static/secrets.txt') as f: token = f.read()
# get the location from digital globe
g = grabber.Grabber('app/static/img', token,'png')
time = g.grab(lat, lon, zoom)
# 'smart' means that the image went through the neural net prediction script
smart_contours = segment.predict(time,'app/ma_prediction_400/dg%s.png'%(time), 'app/static/img/nn_dg'+time+'.png')
smart_areas = segment.get_areas(smart_contours.values())
# 'dumb' meanas that the segmentation was on the original image
dumb_contours = segment.dumb_contours('app/static/img/dg'+time+'.png','app/static/img/dumy_dg'+time+'.png')
dumb_areas = segment.get_areas(dumb_contours.values())
# uses 'smart' locations to pick out contours in the 'dumb' image
buildings = merge.intersect(smart_contours, dumb_contours)
merge.mkimage('app/static/img/dg'+time+'.png','app/static/img/merge_dg'+time+'.png', buildings)
areas = segment.get_areas(buildings.values())
url_nn = url_for('static', filename='img/nn_base_dg'+time+'.png')
url_smart = url_for('static', filename='img/nn_dg'+time+'.png')
url_dumb = url_for('static', filename='img/dumy_dg'+time+'.png')
url_merge = url_for('static', filename='img/merge_dg'+time+'.png')
# # for cameron
# dumb_contours = segment.dumb_contours('app/static/img/dg'+time+'.png','app/static/img/dumy_dg'+time+'.png')
# dumb_areas = segment.get_areas(dumb_contours.values())
# areas = dumb_areas
# url_nn = ''
# url_smart = ''
# url_merge = ''
# url_dumb = url_for('static', filename='img/dumy_dg'+time+'.png')
return jsonify(url_nn=url_nn, url_smart=url_smart, url_dumb=url_dumb, url_merge=url_merge,
areas=areas
)
|
ncmatson/OSTE
|
app/views.py
|
Python
|
mit
| 2,380
| 0.006723
|
import re
import textwrap
__all__ = ['dumps', 'loads']
SPLIT_ITEMS = re.compile(r'\n(?!\s)').split
MATCH_ITEM = re.compile(r'''
(?P<key>\w+): # key
\s?
(?P<value>.*?)$ # first line
(?P<value2>.+)? # optional continuation line(s)
''', re.MULTILINE | re.DOTALL | re.VERBOSE).match
def dumps(data, comments={}):
s = ''
for k, v in data.items():
comment = comments.get(k, None)
if comment:
s += '# ' + '\n '.join(comment.splitlines()) + '\n'
value = v or ''
s += '{}: {}\n'.format(k, value.replace('\n', '\n '))
return s
def loads(serialized):
data = {}
lineno = 0
for item in SPLIT_ITEMS(serialized):
if not item.startswith('#') and item.strip():
m = MATCH_ITEM(item)
if not m:
raise ValueError('syntax error on line {}'.format(lineno + 1))
value = m.group('value')
value += textwrap.dedent(m.group('value2') or '')
data[m.group('key')] = value or None
lineno += item.count('\n') + 1
return data
|
natano/python-git-orm
|
git_orm/serializer.py
|
Python
|
isc
| 1,101
| 0
|
class MutableValue:
"""
Used to avoid warnings (and in future errors) from aiohttp when the app context is modified.
"""
__slots__ = 'value',
def __init__(self, value=None):
self.value = value
def change(self, new_value):
self.value = new_value
def __len__(self):
return len(self.value)
def __repr__(self):
return repr(self.value)
def __str__(self):
return str(self.value)
def __bool__(self):
return bool(self.value)
def __eq__(self, other):
return MutableValue(self.value == other)
def __add__(self, other):
return self.value + other
def __getattr__(self, item):
return getattr(self.value, item)
|
samuelcolvin/aiohttp-devtools
|
aiohttp_devtools/runserver/utils.py
|
Python
|
mit
| 732
| 0.001366
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-06-06 06:33
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Expense',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('expense_date', models.DateField()),
('expense_detail', models.CharField(help_text='Enter expense details', max_length=200, null=True)),
('expense_amount', models.FloatField(help_text='Enter expense amount', null=True)),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
]
|
PrasannaBarate/ExpenseTracker-Django
|
DailyExpenses/migrations/0001_initial.py
|
Python
|
apache-2.0
| 998
| 0.003006
|
import builtins
import imp
from importlib.test.import_ import test_relative_imports
from importlib.test.import_ import util as importlib_util
import marshal
import os
import py_compile
import random
import stat
import sys
import unittest
import textwrap
from test.support import (
EnvironmentVarGuard, TESTFN, check_warnings, forget, is_jython,
make_legacy_pyc, rmtree, run_unittest, swap_attr, swap_item, temp_umask,
unlink, unload)
from test import script_helper
def remove_files(name):
for f in (name + ".py",
name + ".pyc",
name + ".pyo",
name + ".pyw",
name + "$py.class"):
unlink(f)
rmtree('__pycache__')
class ImportTests(unittest.TestCase):
def setUp(self):
remove_files(TESTFN)
def tearDown(self):
unload(TESTFN)
setUp = tearDown
def test_case_sensitivity(self):
# Brief digression to test that import is case-sensitive: if we got
# this far, we know for sure that "random" exists.
with self.assertRaises(ImportError):
import RAnDoM
def test_double_const(self):
# Another brief digression to test the accuracy of manifest float
# constants.
from test import double_const # don't blink -- that *was* the test
def test_import(self):
def test_with_extension(ext):
# The extension is normally ".py", perhaps ".pyw".
source = TESTFN + ext
pyo = TESTFN + ".pyo"
if is_jython:
pyc = TESTFN + "$py.class"
else:
pyc = TESTFN + ".pyc"
with open(source, "w") as f:
print("# This tests Python's ability to import a",
ext, "file.", file=f)
a = random.randrange(1000)
b = random.randrange(1000)
print("a =", a, file=f)
print("b =", b, file=f)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
try:
try:
mod = __import__(TESTFN)
except ImportError as err:
self.fail("import from %s failed: %s" % (ext, err))
self.assertEqual(mod.a, a,
"module loaded (%s) but contents invalid" % mod)
self.assertEqual(mod.b, b,
"module loaded (%s) but contents invalid" % mod)
finally:
forget(TESTFN)
unlink(source)
unlink(pyc)
unlink(pyo)
sys.path.insert(0, os.curdir)
try:
test_with_extension(".py")
if sys.platform.startswith("win"):
for ext in [".PY", ".Py", ".pY", ".pyw", ".PYW", ".pYw"]:
test_with_extension(ext)
finally:
del sys.path[0]
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_execute_bit_not_copied(self):
# Issue 6070: under posix .pyc files got their execute bit set if
# the .py file had the execute bit set, but they aren't executable.
with temp_umask(0o022):
sys.path.insert(0, os.curdir)
try:
fname = TESTFN + os.extsep + "py"
open(fname, 'w').close()
os.chmod(fname, (stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH |
stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH))
__import__(TESTFN)
fn = imp.cache_from_source(fname)
if not os.path.exists(fn):
self.fail("__import__ did not result in creation of "
"either a .pyc or .pyo file")
s = os.stat(fn)
self.assertEqual(
stat.S_IMODE(s.st_mode),
stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
finally:
del sys.path[0]
remove_files(TESTFN)
unload(TESTFN)
def test_imp_module(self):
# Verify that the imp module can correctly load and find .py files
# XXX (ncoghlan): It would be nice to use support.CleanImport
# here, but that breaks because the os module registers some
# handlers in copy_reg on import. Since CleanImport doesn't
# revert that registration, the module is left in a broken
# state after reversion. Reinitialising the module contents
# and just reverting os.environ to its previous state is an OK
# workaround
orig_path = os.path
orig_getenv = os.getenv
with EnvironmentVarGuard():
x = imp.find_module("os")
self.addCleanup(x[0].close)
new_os = imp.load_module("os", *x)
self.assertIs(os, new_os)
self.assertIs(orig_path, new_os.path)
self.assertIsNot(orig_getenv, new_os.getenv)
def test_module_with_large_stack(self, module='longlist'):
# Regression test for http://bugs.python.org/issue561858.
filename = module + '.py'
# Create a file with a list of 65000 elements.
with open(filename, 'w') as f:
f.write('d = [\n')
for i in range(65000):
f.write('"",\n')
f.write(']')
try:
# Compile & remove .py file; we only need .pyc (or .pyo).
# Bytecode must be relocated from the PEP 3147 bytecode-only location.
py_compile.compile(filename)
finally:
unlink(filename)
# Need to be able to load from current dir.
sys.path.append('')
try:
make_legacy_pyc(filename)
# This used to crash.
exec('import ' + module)
finally:
# Cleanup.
del sys.path[-1]
unlink(filename + 'c')
unlink(filename + 'o')
def test_failing_import_sticks(self):
source = TESTFN + ".py"
with open(source, "w") as f:
print("a = 1/0", file=f)
# New in 2.4, we shouldn't be able to import that no matter how often
# we try.
sys.path.insert(0, os.curdir)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
try:
for i in [1, 2, 3]:
self.assertRaises(ZeroDivisionError, __import__, TESTFN)
self.assertNotIn(TESTFN, sys.modules,
"damaged module in sys.modules on %i try" % i)
finally:
del sys.path[0]
remove_files(TESTFN)
def test_import_name_binding(self):
# import x.y.z binds x in the current namespace
import test as x
import test.support
self.assertTrue(x is test, x.__name__)
self.assertTrue(hasattr(test.support, "__file__"))
# import x.y.z as w binds z as w
import test.support as y
self.assertTrue(y is test.support, y.__name__)
def test_failing_reload(self):
# A failing reload should leave the module object in sys.modules.
source = TESTFN + os.extsep + "py"
with open(source, "w") as f:
f.write("a = 1\nb=2\n")
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assertIn(TESTFN, sys.modules)
self.assertEqual(mod.a, 1, "module has wrong attribute values")
self.assertEqual(mod.b, 2, "module has wrong attribute values")
# On WinXP, just replacing the .py file wasn't enough to
# convince reload() to reparse it. Maybe the timestamp didn't
# move enough. We force it to get reparsed by removing the
# compiled file too.
remove_files(TESTFN)
# Now damage the module.
with open(source, "w") as f:
f.write("a = 10\nb=20//0\n")
self.assertRaises(ZeroDivisionError, imp.reload, mod)
# But we still expect the module to be in sys.modules.
mod = sys.modules.get(TESTFN)
self.assertIsNot(mod, None, "expected module to be in sys.modules")
# We should have replaced a w/ 10, but the old b value should
# stick.
self.assertEqual(mod.a, 10, "module has wrong attribute values")
self.assertEqual(mod.b, 2, "module has wrong attribute values")
finally:
del sys.path[0]
remove_files(TESTFN)
unload(TESTFN)
def test_file_to_source(self):
# check if __file__ points to the source file where available
source = TESTFN + ".py"
with open(source, "w") as f:
f.write("test = None\n")
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assertTrue(mod.__file__.endswith('.py'))
os.remove(source)
del sys.modules[TESTFN]
make_legacy_pyc(source)
mod = __import__(TESTFN)
base, ext = os.path.splitext(mod.__file__)
self.assertIn(ext, ('.pyc', '.pyo'))
finally:
del sys.path[0]
remove_files(TESTFN)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
def test_import_name_binding(self):
# import x.y.z binds x in the current namespace.
import test as x
import test.support
self.assertIs(x, test, x.__name__)
self.assertTrue(hasattr(test.support, "__file__"))
# import x.y.z as w binds z as w.
import test.support as y
self.assertIs(y, test.support, y.__name__)
def test_import_initless_directory_warning(self):
with check_warnings(('', ImportWarning)):
# Just a random non-package directory we always expect to be
# somewhere in sys.path...
self.assertRaises(ImportError, __import__, "site-packages")
def test_import_by_filename(self):
path = os.path.abspath(TESTFN)
encoding = sys.getfilesystemencoding()
try:
path.encode(encoding)
except UnicodeEncodeError:
self.skipTest('path is not encodable to {}'.format(encoding))
with self.assertRaises(ImportError) as c:
__import__(path)
self.assertEqual("Import by filename is not supported.",
c.exception.args[0])
def test_import_in_del_does_not_crash(self):
# Issue 4236
testfn = script_helper.make_script('', TESTFN, textwrap.dedent("""\
import sys
class C:
def __del__(self):
import imp
sys.argv.insert(0, C())
"""))
script_helper.assert_python_ok(testfn)
class PycRewritingTests(unittest.TestCase):
# Test that the `co_filename` attribute on code objects always points
# to the right file, even when various things happen (e.g. both the .py
# and the .pyc file are renamed).
module_name = "unlikely_module_name"
module_source = """
import sys
code_filename = sys._getframe().f_code.co_filename
module_filename = __file__
constant = 1
def func():
pass
func_filename = func.__code__.co_filename
"""
dir_name = os.path.abspath(TESTFN)
file_name = os.path.join(dir_name, module_name) + os.extsep + "py"
compiled_name = imp.cache_from_source(file_name)
def setUp(self):
self.sys_path = sys.path[:]
self.orig_module = sys.modules.pop(self.module_name, None)
os.mkdir(self.dir_name)
with open(self.file_name, "w") as f:
f.write(self.module_source)
sys.path.insert(0, self.dir_name)
def tearDown(self):
sys.path[:] = self.sys_path
if self.orig_module is not None:
sys.modules[self.module_name] = self.orig_module
else:
unload(self.module_name)
unlink(self.file_name)
unlink(self.compiled_name)
rmtree(self.dir_name)
def import_module(self):
ns = globals()
__import__(self.module_name, ns, ns)
return sys.modules[self.module_name]
def test_basics(self):
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
del sys.modules[self.module_name]
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_incorrect_code_name(self):
py_compile.compile(self.file_name, dfile="another_module.py")
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_module_without_source(self):
target = "another_module.py"
py_compile.compile(self.file_name, dfile=target)
os.remove(self.file_name)
pyc_file = make_legacy_pyc(self.file_name)
mod = self.import_module()
self.assertEqual(mod.module_filename, pyc_file)
self.assertEqual(mod.code_filename, target)
self.assertEqual(mod.func_filename, target)
def test_foreign_code(self):
py_compile.compile(self.file_name)
with open(self.compiled_name, "rb") as f:
header = f.read(8)
code = marshal.load(f)
constants = list(code.co_consts)
foreign_code = test_main.__code__
pos = constants.index(1)
constants[pos] = foreign_code
code = type(code)(code.co_argcount, code.co_kwonlyargcount,
code.co_nlocals, code.co_stacksize,
code.co_flags, code.co_code, tuple(constants),
code.co_names, code.co_varnames, code.co_filename,
code.co_name, code.co_firstlineno, code.co_lnotab,
code.co_freevars, code.co_cellvars)
with open(self.compiled_name, "wb") as f:
f.write(header)
marshal.dump(code, f)
mod = self.import_module()
self.assertEqual(mod.constant.co_filename, foreign_code.co_filename)
class PathsTests(unittest.TestCase):
SAMPLES = ('test', 'test\u00e4\u00f6\u00fc\u00df', 'test\u00e9\u00e8',
'test\u00b0\u00b3\u00b2')
path = TESTFN
def setUp(self):
os.mkdir(self.path)
self.syspath = sys.path[:]
def tearDown(self):
rmtree(self.path)
sys.path[:] = self.syspath
# Regression test for http://bugs.python.org/issue1293.
def test_trailing_slash(self):
with open(os.path.join(self.path, 'test_trailing_slash.py'), 'w') as f:
f.write("testdata = 'test_trailing_slash'")
sys.path.append(self.path+'/')
mod = __import__("test_trailing_slash")
self.assertEqual(mod.testdata, 'test_trailing_slash')
unload("test_trailing_slash")
# Regression test for http://bugs.python.org/issue3677.
def _test_UNC_path(self):
with open(os.path.join(self.path, 'test_trailing_slash.py'), 'w') as f:
f.write("testdata = 'test_trailing_slash'")
# Create the UNC path, like \\myhost\c$\foo\bar.
path = os.path.abspath(self.path)
import socket
hn = socket.gethostname()
drive = path[0]
unc = "\\\\%s\\%s$"%(hn, drive)
unc += path[2:]
sys.path.append(path)
mod = __import__("test_trailing_slash")
self.assertEqual(mod.testdata, 'test_trailing_slash')
unload("test_trailing_slash")
if sys.platform == "win32":
test_UNC_path = _test_UNC_path
class RelativeImportTests(unittest.TestCase):
def tearDown(self):
unload("test.relimport")
setUp = tearDown
def test_relimport_star(self):
# This will import * from .test_import.
from . import relimport
self.assertTrue(hasattr(relimport, "RelativeImportTests"))
def test_issue3221(self):
# Note for mergers: the 'absolute' tests from the 2.x branch
# are missing in Py3k because implicit relative imports are
# a thing of the past
#
# Regression test for http://bugs.python.org/issue3221.
def check_relative():
exec("from . import relimport", ns)
# Check relative import OK with __package__ and __name__ correct
ns = dict(__package__='test', __name__='test.notarealmodule')
check_relative()
# Check relative import OK with only __name__ wrong
ns = dict(__package__='test', __name__='notarealpkg.notarealmodule')
check_relative()
# Check relative import fails with only __package__ wrong
ns = dict(__package__='foo', __name__='test.notarealmodule')
self.assertRaises(SystemError, check_relative)
# Check relative import fails with __package__ and __name__ wrong
ns = dict(__package__='foo', __name__='notarealpkg.notarealmodule')
self.assertRaises(SystemError, check_relative)
# Check relative import fails with package set to a non-string
ns = dict(__package__=object())
self.assertRaises(ValueError, check_relative)
def test_absolute_import_without_future(self):
# If explicit relative import syntax is used, then do not try
# to perform an absolute import in the face of failure.
# Issue #7902.
with self.assertRaises(ImportError):
from .os import sep
self.fail("explicit relative import triggered an "
"implicit absolute import")
class OverridingImportBuiltinTests(unittest.TestCase):
def test_override_builtin(self):
# Test that overriding builtins.__import__ can bypass sys.modules.
import os
def foo():
import os
return os
self.assertEqual(foo(), os) # Quick sanity check.
with swap_attr(builtins, "__import__", lambda *x: 5):
self.assertEqual(foo(), 5)
# Test what happens when we shadow __import__ in globals(); this
# currently does not impact the import process, but if this changes,
# other code will need to change, so keep this test as a tripwire.
with swap_item(globals(), "__import__", lambda *x: 5):
self.assertEqual(foo(), os)
class PycacheTests(unittest.TestCase):
# Test the various PEP 3147 related behaviors.
tag = imp.get_tag()
def _clean(self):
forget(TESTFN)
rmtree('__pycache__')
unlink(self.source)
def setUp(self):
self.source = TESTFN + '.py'
self._clean()
with open(self.source, 'w') as fp:
print('# This is a test file written by test_import.py', file=fp)
sys.path.insert(0, os.curdir)
def tearDown(self):
assert sys.path[0] == os.curdir, 'Unexpected sys.path[0]'
del sys.path[0]
self._clean()
def test_import_pyc_path(self):
self.assertFalse(os.path.exists('__pycache__'))
__import__(TESTFN)
self.assertTrue(os.path.exists('__pycache__'))
self.assertTrue(os.path.exists(os.path.join(
'__pycache__', '{}.{}.py{}'.format(
TESTFN, self.tag, __debug__ and 'c' or 'o'))))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_unwritable_directory(self):
# When the umask causes the new __pycache__ directory to be
# unwritable, the import still succeeds but no .pyc file is written.
with temp_umask(0o222):
__import__(TESTFN)
self.assertTrue(os.path.exists('__pycache__'))
self.assertFalse(os.path.exists(os.path.join(
'__pycache__', '{}.{}.pyc'.format(TESTFN, self.tag))))
def test_missing_source(self):
# With PEP 3147 cache layout, removing the source but leaving the pyc
# file does not satisfy the import.
__import__(TESTFN)
pyc_file = imp.cache_from_source(self.source)
self.assertTrue(os.path.exists(pyc_file))
os.remove(self.source)
forget(TESTFN)
self.assertRaises(ImportError, __import__, TESTFN)
def test_missing_source_legacy(self):
# Like test_missing_source() except that for backward compatibility,
# when the pyc file lives where the py file would have been (and named
# without the tag), it is importable. The __file__ of the imported
# module is the pyc location.
__import__(TESTFN)
# pyc_file gets removed in _clean() via tearDown().
pyc_file = make_legacy_pyc(self.source)
os.remove(self.source)
unload(TESTFN)
m = __import__(TESTFN)
self.assertEqual(m.__file__,
os.path.join(os.curdir, os.path.relpath(pyc_file)))
def test___cached__(self):
# Modules now also have an __cached__ that points to the pyc file.
m = __import__(TESTFN)
pyc_file = imp.cache_from_source(TESTFN + '.py')
self.assertEqual(m.__cached__, os.path.join(os.curdir, pyc_file))
def test___cached___legacy_pyc(self):
# Like test___cached__() except that for backward compatibility,
# when the pyc file lives where the py file would have been (and named
# without the tag), it is importable. The __cached__ of the imported
# module is the pyc location.
__import__(TESTFN)
# pyc_file gets removed in _clean() via tearDown().
pyc_file = make_legacy_pyc(self.source)
os.remove(self.source)
unload(TESTFN)
m = __import__(TESTFN)
self.assertEqual(m.__cached__,
os.path.join(os.curdir, os.path.relpath(pyc_file)))
def test_package___cached__(self):
# Like test___cached__ but for packages.
def cleanup():
rmtree('pep3147')
os.mkdir('pep3147')
self.addCleanup(cleanup)
# Touch the __init__.py
with open(os.path.join('pep3147', '__init__.py'), 'w'):
pass
with open(os.path.join('pep3147', 'foo.py'), 'w'):
pass
unload('pep3147.foo')
unload('pep3147')
m = __import__('pep3147.foo')
init_pyc = imp.cache_from_source(
os.path.join('pep3147', '__init__.py'))
self.assertEqual(m.__cached__, os.path.join(os.curdir, init_pyc))
foo_pyc = imp.cache_from_source(os.path.join('pep3147', 'foo.py'))
self.assertEqual(sys.modules['pep3147.foo'].__cached__,
os.path.join(os.curdir, foo_pyc))
def test_package___cached___from_pyc(self):
# Like test___cached__ but ensuring __cached__ when imported from a
# PEP 3147 pyc file.
def cleanup():
rmtree('pep3147')
os.mkdir('pep3147')
self.addCleanup(cleanup)
unload('pep3147.foo')
unload('pep3147')
# Touch the __init__.py
with open(os.path.join('pep3147', '__init__.py'), 'w'):
pass
with open(os.path.join('pep3147', 'foo.py'), 'w'):
pass
m = __import__('pep3147.foo')
unload('pep3147.foo')
unload('pep3147')
m = __import__('pep3147.foo')
init_pyc = imp.cache_from_source(
os.path.join('pep3147', '__init__.py'))
self.assertEqual(m.__cached__, os.path.join(os.curdir, init_pyc))
foo_pyc = imp.cache_from_source(os.path.join('pep3147', 'foo.py'))
self.assertEqual(sys.modules['pep3147.foo'].__cached__,
os.path.join(os.curdir, foo_pyc))
class RelativeImportFromImportlibTests(test_relative_imports.RelativeImports):
def setUp(self):
self._importlib_util_flag = importlib_util.using___import__
importlib_util.using___import__ = True
def tearDown(self):
importlib_util.using___import__ = self._importlib_util_flag
def test_main(verbose=None):
run_unittest(ImportTests, PycacheTests,
PycRewritingTests, PathsTests, RelativeImportTests,
OverridingImportBuiltinTests,
RelativeImportFromImportlibTests)
if __name__ == '__main__':
# Test needs to be a package, so we can do relative imports.
from test.test_import import test_main
test_main()
|
invisiblek/python-for-android
|
python3-alpha/python3-src/Lib/test/test_import.py
|
Python
|
apache-2.0
| 24,643
| 0.000203
|
"""nox-poetry configuration file."""
from calcipy.dev.noxfile import build_check, build_dist, check_safety, coverage, tests # noqa: F401
|
KyleKing/recipes
|
noxfile.py
|
Python
|
mit
| 139
| 0
|
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Contact: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import, division, unicode_literals
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.hashes import SHA256, Hash
def sha256(bytes):
digest = Hash(SHA256(), backend=default_backend())
digest.update(bytes)
return digest.finalize()
|
klahnakoski/SpotManager
|
vendor/mo_math/hashes.py
|
Python
|
mpl-2.0
| 593
| 0
|
from xml.etree import ElementTree as ET
def qn_tag(n, t):
return {
'ce': str(ET.QName('http://catchexception.org/xml-namespaces/ce', t)),
'sparkle': str(ET.QName('http://www.andymatuschak.org/xml-namespaces/sparkle', t))
}[n]
def create_channel(m):
if m['stable']:
return 'stable'
else:
return '{0}/{1}'.format(m['user'], m['branch'])
def create_link(rel_channel, filename):
return 'https://builds.catchexception.org/obs-studio/{0}/{1}'.format(rel_channel, filename)
def create_version(m):
if m['stable']:
return m['tag']['name']
else:
return '{0}.{1}'.format(m['tag']['name'], m['jenkins_build'])
def create_feed(rel_channel):
rss_el = ET.Element('rss')
title = 'OBS Studio {0} channel'.format(rel_channel)
link = create_link(rel_channel, "updates.xml")
description = 'OBS Studio update channel'
channel_el = ET.SubElement(rss_el, 'channel')
ET.SubElement(channel_el, 'title').text = title
ET.SubElement(channel_el, 'link').text = link
ET.SubElement(channel_el, 'description').text = description
ET.SubElement(channel_el, 'language').text = 'en'
return rss_el
def load_or_create_feed(rel_channel):
link = create_link(rel_channel, "updates.xml")
import urllib2
feed = create_feed(rel_channel)
try:
resp = urllib2.urlopen(link)
feed = ET.fromstring(resp.read())
except urllib2.HTTPError, e:
if e.code != 404:
raise
return feed
except:
raise
return feed
def load_or_create_history(rel_channel):
link = create_link(rel_channel, "history")
import urllib2, cPickle
try:
resp = urllib2.urlopen(link)
return cPickle.loads(resp.read())
except urllib2.HTTPError, e:
if e.code != 404:
raise
return dict()
def sign_package(package, key):
from shlex import split as shplit
from subprocess import PIPE
with open(package, 'r') as f:
import subprocess
p1 = subprocess.Popen(shplit('openssl dgst -sha1 -binary'), stdin=f, stdout=PIPE)
p2 = subprocess.Popen(shplit('openssl dgst -dss1 -sign "{0}"'.format(key)), stdin=p1.stdout, stdout=PIPE)
p3 = subprocess.Popen(shplit('openssl enc -base64'), stdin=p2.stdout, stdout=PIPE)
sig = ''.join(p3.communicate()[0].splitlines())
p1.poll(), p2.poll(), p3.poll()
if p1.returncode or p2.returncode or p3.returncode:
raise RuntimeError
return sig
def load_manifest(manifest_file):
with open(manifest_file, 'r') as f:
import cPickle
return cPickle.load(f)
def populate_item(item, package, key, m, channel, package_type):
from email.utils import formatdate
import os
package_path = '{0}-{1}.zip'.format(package, package_type)
signature = sign_package(package_path, key)
user_version = create_version(m)
base_url = 'https://builds.catchexception.org/obs-studio/{0}'.format(channel)
title = 'OBS Studio {0} on {1} ({2})'.format(user_version, channel, package_type)
ET.SubElement(item, 'title').text = title
ET.SubElement(item, qn_tag('sparkle', 'releaseNotesLink')).text = '{0}/notes.html'.format(base_url)
ET.SubElement(item, 'pubDate').text = formatdate()
ET.SubElement(item, qn_tag('ce', 'packageType')).text = package_type
if m['stable']:
ET.SubElement(item, qn_tag('ce', 'deployed')).text = 'false'
version = m['tag']['name']
else:
version = m['jenkins_build']
ET.SubElement(item, 'enclosure', {
'length': str(os.stat(package_path).st_size),
'type': 'application/octet-stream',
'url': '{0}/{1}-{2}.zip'.format(base_url, user_version, package_type),
qn_tag('ce', 'sha1'): m['sha1'],
qn_tag('sparkle', 'dsaSignature'): signature,
qn_tag('sparkle', 'shortVersionString'): user_version,
qn_tag('sparkle', 'version'): version
})
def mkdir(dirname):
import os, errno
try:
os.makedirs(dirname)
except OSError, e:
if e.errno != errno.EEXIST:
raise
def write_tag_html(f, desc):
ul = False
for l in desc:
if not len(l):
continue
if l.startswith('*'):
ul = True
if not ul:
f.write('<ul>')
import re
f.write('<li>{0}</li>'.format(re.sub(r'^(\s*)?[*](\s*)?', '', l)))
else:
ul = False
if ul:
f.write('</ul>')
f.write('<p>{0}</p>'.format(l))
if ul:
f.write('</ul>')
def write_notes_html(f, manifest, versions, history):
# make newest to oldest
commits = [dict(sha1 = c[:40], desc = c[41:]) for c in manifest['commits']]
known_commits = set(c['sha1'] for c in commits)
commit_known = lambda commit: commit['sha1'] in known_commits
history[manifest['sha1']] = commits
from distutils.version import LooseVersion
last_tag = LooseVersion(manifest['tag']['name'])
versions = [v for v in versions if LooseVersion(v['user_version']) >= last_tag]
for v in versions:
v['commit_set'] = set(c['sha1'] for c in history.get(v['sha1'], []))
# oldest to newest
if versions:
v = versions[0]
v['commits'] = [dict(c) for c in history.get(v['sha1'], [])]
v['known'] = commit_known(v)
for c in v['commits']:
c['known'] = commit_known(c)
c['removed'] = False
for p, v in zip(versions, versions[1:]):
v['commits'] = list()
v['known'] = commit_known(v)
removed = p['commit_set'] - v['commit_set']
added = v['commit_set'] - p['commit_set']
for c in history.get(v['sha1'], []):
if c['sha1'] in added:
v['commits'].append(dict(c))
v['commits'][-1]['removed'] = False
for c in history.get(p['sha1'], [])[::-1]:
if c['sha1'] in removed:
v['commits'].append(dict(c))
v['commits'][-1]['removed'] = True
for c in v['commits']:
c['known'] = commit_known(c)
have_displayable_commits = False
for v in versions:
if v['commits']:
have_displayable_commits = True
break
f.write('''
<!DOCTYPE html>
<html>
<head>
<title>Release notes for version {0}</title>
<meta charset="utf-8">
<script>
var versions = ["{1}"];
function toggle(version)
{{
var changes = document.getElementById("changes" + version);
if (changes != null)
changes.style.display = changes.style.display == "none" ? "block" : "none";
var link = document.getElementById("toggle" + version);
if (link != null)
link.innerHTML = link.innerHTML == "[-]" ? "[+]" : "[-]";
return false;
}}
function toggle_lower(version)
{{
if (versions.indexOf(version) == -1)
return;
var version_found = false;
var captions = document.getElementsByTagName("h3");
for (var i = 0; i < captions.length; i++) {{
var parts = captions[i].id.split("caption");
if (!parts || parts.length != 2)
continue;
var rebased = captions[i].className.search(/rebased/) != -1;
var current_version = parts[1] == version;
if (version_found) {{
captions[i].className += " old";
toggle(parts[1]);
}}
if (current_version)
version_found = true;
}}
}}
</script>
<style>
html
{{
font-family: sans-serif;
}}
h3 a
{{
font-family: monospace;
}}
h3.old
{{
color: gray;
}}
.removed
{{
text-decoration: line-through;
}}
</style>
</head>
<body>
'''.format(manifest['tag']['name'], '", "'.join(str(v['internal_version']) for v in versions)))
if have_displayable_commits:
for v in versions[::-1]:
removed_class = ' class="removed"'
extra_style = removed_class if not v['known'] else ""
expand_link = ' <a id="toggle{0}" href="#caption{0}" onclick="return toggle(\'{0}\')">[-]</a>'.format(v['internal_version']) if v['commits'] else ""
caption = '<h3 id="caption{0}"{2}>Release notes for version {1}{3}</h3>'
caption = caption.format(v['internal_version'], v['user_version'], extra_style, expand_link)
f.write(caption)
if len(v['commits']):
url = 'https://github.com/{0}/obs-studio/commit/{1}'
change_fmt = '<li><a href="{0}"{2}>(view)</a> {1}</li>'
f.write('<ul id="changes{0}">'.format(v['internal_version']))
for c in v['commits']:
extra_style = removed_class if not c['known'] else ""
text = ("<span{0}>{1}</span>" if c['removed'] else "{1}").format(removed_class, c['desc'])
url_formatted = url.format(manifest['user'], c['sha1'])
f.write(change_fmt.format(url_formatted, text, extra_style))
f.write('</ul>')
f.write('<h2>Release notes for version {0}</h2>'.format(manifest['tag']['name']))
write_tag_html(f, manifest['tag']['description'])
f.write('''
<script>
parts = window.location.href.toString().split("#");
if (parts.length == 2 && parts[1].search(/^\d+$/) == 0)
toggle_lower(parts[1]);
</script>
</body>
</html>
''')
def dump_xml(file, element):
with open(file, 'w') as f:
f.write('<?xml version="1.0" encoding="UTF-8" standalone="no"?>')
ET.ElementTree(element).write(f, encoding='utf-8', method='xml')
def create_update(package, key, manifest_file):
manifest = load_manifest(manifest_file)
channel = create_channel(manifest)
feed_ele = load_or_create_feed(channel)
history = load_or_create_history(channel)
from distutils.version import LooseVersion
if manifest['stable']:
my_version = LooseVersion(manifest['tag']['name'])
else:
my_version = LooseVersion(manifest['jenkins_build'])
versions = []
seen_versions = set()
for item in feed_ele.findall('channel/item'):
en_ele = item.find('enclosure')
internal_version = LooseVersion(en_ele.get(qn_tag('sparkle', 'version')))
user_version = en_ele.get(qn_tag('sparkle', 'shortVersionString'))
sha1 = en_ele.get(qn_tag('ce', 'sha1'))
if internal_version == my_version:
# shouldn't happen, delete
feed_ele.find('channel').remove(item)
continue
if str(internal_version) in seen_versions:
continue
seen_versions.add(str(internal_version))
versions.append({
'internal_version': internal_version,
'user_version': user_version,
'sha1': sha1
})
versions.append(dict(
internal_version = my_version,
user_version = create_version(manifest),
sha1 = manifest['sha1']
))
import StringIO
notes = StringIO.StringIO()
write_notes_html(notes, manifest, versions, history)
new_item = ET.SubElement(feed_ele.find('channel'), 'item')
populate_item(new_item, package, key, manifest, channel, 'mpkg')
new_item = ET.SubElement(feed_ele.find('channel'), 'item')
populate_item(new_item, package, key, manifest, channel, 'app')
from os import path
deploy_path = path.join('deploy', channel)
mkdir(deploy_path)
feed_ele = ET.fromstring(ET.tostring(feed_ele, encoding='utf-8', method='xml'))
dump_xml(path.join(deploy_path, 'updates.xml'), feed_ele)
with open(path.join(deploy_path, 'notes.html'), 'w') as f:
f.write(notes.getvalue())
with open(path.join(deploy_path, 'history'), 'w') as f:
import cPickle
cPickle.dump(history, f)
import shutil
shutil.copy('{0}-mpkg.zip'.format(package), path.join(deploy_path, '{0}-mpkg.zip'.format(create_version(manifest))))
shutil.copy('{0}-app.zip'.format(package), path.join(deploy_path, '{0}-app.zip'.format(create_version(manifest))))
if __name__ == "__main__":
ET.register_namespace('sparkle', 'http://www.andymatuschak.org/xml-namespaces/sparkle')
ET.register_namespace('ce', 'http://catchexception.org/xml-namespaces/ce')
import argparse
parser = argparse.ArgumentParser(description='obs-studio release util')
parser.add_argument('-m', '--manifest', dest='manifest', default='manifest')
parser.add_argument('-p', '--package', dest='package', default='OBS')
parser.add_argument('-k', '--key', dest='key')
args = parser.parse_args()
create_update(args.package, args.key, args.manifest)
|
alesaccoia/chew-broadcaster
|
install-utils/release/osx/release_util.py
|
Python
|
gpl-2.0
| 13,845
| 0.004478
|
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
from neutronclient.neutron.v2_0.vpn import ipsecpolicy
from neutronclient.tests.unit import test_cli20
class CLITestV20VpnIpsecPolicyJSON(test_cli20.CLITestV20Base):
def test_create_ipsecpolicy_all_params(self):
"""vpn-ipsecpolicy-create all params with dashes."""
resource = 'ipsecpolicy'
cmd = ipsecpolicy.CreateIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
name = 'ipsecpolicy1'
description = 'first-ipsecpolicy1'
auth_algorithm = 'sha1'
encryption_algorithm = 'aes-256'
encapsulation_mode = 'tunnel'
pfs = 'group5'
transform_protocol = 'ah'
tenant_id = 'my-tenant'
my_id = 'my-id'
lifetime = 'units=seconds,value=20000'
args = [name,
'--description', description,
'--tenant-id', tenant_id,
'--auth-algorithm', auth_algorithm,
'--encryption-algorithm', encryption_algorithm,
'--transform-protocol', transform_protocol,
'--encapsulation-mode', encapsulation_mode,
'--lifetime', lifetime,
'--pfs', pfs]
position_names = ['name', 'auth_algorithm', 'encryption_algorithm',
'encapsulation_mode', 'description',
'transform_protocol', 'pfs',
'tenant_id']
position_values = [name, auth_algorithm, encryption_algorithm,
encapsulation_mode, description,
transform_protocol, pfs,
tenant_id]
extra_body = {
'lifetime': {
'units': 'seconds',
'value': 20000,
},
}
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values,
extra_body=extra_body)
def test_create_ipsecpolicy_with_limited_params(self):
"""vpn-ipsecpolicy-create with limited params."""
resource = 'ipsecpolicy'
cmd = ipsecpolicy.CreateIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
name = 'ipsecpolicy1'
auth_algorithm = 'sha1'
encryption_algorithm = 'aes-128'
encapsulation_mode = 'tunnel'
pfs = 'group5'
transform_protocol = 'esp'
tenant_id = 'my-tenant'
my_id = 'my-id'
args = [name,
'--tenant-id', tenant_id]
position_names = ['name', 'auth_algorithm', 'encryption_algorithm',
'encapsulation_mode',
'transform_protocol', 'pfs',
'tenant_id']
position_values = [name, auth_algorithm, encryption_algorithm,
encapsulation_mode,
transform_protocol, pfs,
tenant_id]
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values)
def _test_lifetime_values(self, lifetime):
resource = 'ipsecpolicy'
cmd = ipsecpolicy.CreateIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
name = 'ipsecpolicy1'
description = 'my-ipsec-policy'
auth_algorithm = 'sha1'
encryption_algorithm = 'aes-256'
ike_version = 'v1'
phase1_negotiation_mode = 'main'
pfs = 'group5'
tenant_id = 'my-tenant'
my_id = 'my-id'
args = [name,
'--description', description,
'--tenant-id', tenant_id,
'--auth-algorithm', auth_algorithm,
'--encryption-algorithm', encryption_algorithm,
'--ike-version', ike_version,
'--phase1-negotiation-mode', phase1_negotiation_mode,
'--lifetime', lifetime,
'--pfs', pfs]
position_names = ['name', 'description',
'auth_algorithm', 'encryption_algorithm',
'phase1_negotiation_mode',
'ike_version', 'pfs',
'tenant_id']
position_values = [name, description,
auth_algorithm, encryption_algorithm,
phase1_negotiation_mode, ike_version, pfs,
tenant_id]
try:
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values)
except Exception:
return
self.fail("IPsecPolicy Lifetime Error")
def test_create_ipsecpolicy_with_invalid_lifetime_keys(self):
lifetime = 'uts=seconds,val=20000'
self._test_lifetime_values(lifetime)
def test_create_ipsecpolicy_with_invalide_lifetime_values(self):
lifetime = 'units=minutes,value=0'
self._test_lifetime_values(lifetime)
def test_list_ipsecpolicy(self):
"""vpn-ipsecpolicy-list."""
resources = "ipsecpolicies"
cmd = ipsecpolicy.ListIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, True)
def test_list_ipsecpolicy_pagination(self):
"""vpn-ipsecpolicy-list."""
resources = "ipsecpolicies"
cmd = ipsecpolicy.ListIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources_with_pagination(resources, cmd)
def test_list_ipsecpolicy_sort(self):
"""vpn-ipsecpolicy-list --sort-key name --sort-key id --sort-key asc
--sort-key desc
"""
resources = "ipsecpolicies"
cmd = ipsecpolicy.ListIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd,
sort_key=["name", "id"],
sort_dir=["asc", "desc"])
def test_list_ipsecpolicy_limit(self):
"""vpn-ipsecpolicy-list -P."""
resources = "ipsecpolicies"
cmd = ipsecpolicy.ListIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, page_size=1000)
def test_show_ipsecpolicy_id(self):
"""vpn-ipsecpolicy-show ipsecpolicy_id."""
resource = 'ipsecpolicy'
cmd = ipsecpolicy.ShowIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', self.test_id]
self._test_show_resource(resource, cmd, self.test_id, args, ['id'])
def test_show_ipsecpolicy_id_name(self):
"""vpn-ipsecpolicy-show."""
resource = 'ipsecpolicy'
cmd = ipsecpolicy.ShowIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', '--fields', 'name', self.test_id]
self._test_show_resource(resource, cmd, self.test_id,
args, ['id', 'name'])
def test_update_ipsecpolicy(self):
"""vpn-ipsecpolicy-update myid --name newname --tags a b."""
resource = 'ipsecpolicy'
cmd = ipsecpolicy.UpdateIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', 'newname'],
{'name': 'newname', })
def test_delete_ipsecpolicy(self):
"""vpn-ipsecpolicy-delete my-id."""
resource = 'ipsecpolicy'
cmd = ipsecpolicy.DeleteIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
my_id = 'my-id'
args = [my_id]
self._test_delete_resource(resource, cmd, my_id, args)
class CLITestV20VpnIpsecPolicyXML(CLITestV20VpnIpsecPolicyJSON):
format = 'xml'
|
sjsucohort6/openstack
|
python/venv/lib/python2.7/site-packages/neutronclient/tests/unit/vpn/test_cli20_ipsecpolicy.py
|
Python
|
mit
| 8,365
| 0
|
# -*- coding: utf-8 -*-
"""Reusable mixins for SQLAlchemy declarative models."""
from __future__ import unicode_literals
import datetime
import sqlalchemy as sa
class Timestamps(object):
created = sa.Column(
sa.DateTime,
default=datetime.datetime.utcnow,
server_default=sa.func.now(),
nullable=False,
)
updated = sa.Column(
sa.DateTime,
server_default=sa.func.now(),
default=datetime.datetime.utcnow,
onupdate=datetime.datetime.utcnow,
nullable=False,
)
|
tgbugs/hypush
|
hyputils/memex/db/mixins.py
|
Python
|
mit
| 548
| 0
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify output when a Progress() call is initialized with the list
that represents a canonical "spinner" on the output.
"""
import os
import TestSCons
test = TestSCons.TestSCons(universal_newlines=None)
test.write('SConstruct', r"""
env = Environment()
env['BUILDERS']['C'] = Builder(action=Copy('$TARGET', '$SOURCE'))
Progress(['-\r', '\\\r', '|\r', '/\r'])
env.C('S1.out', 'S1.in')
env.C('S2.out', 'S2.in')
env.C('S3.out', 'S3.in')
env.C('S4.out', 'S4.in')
""")
test.write('S1.in', "S1.in\n")
test.write('S2.in', "S2.in\n")
test.write('S3.in', "S3.in\n")
test.write('S4.in', "S4.in\n")
expect = """\
\\\r|\rCopy("S1.out", "S1.in")
/\r-\rCopy("S2.out", "S2.in")
\\\r|\rCopy("S3.out", "S3.in")
/\r-\rCopy("S4.out", "S4.in")
\\\r|\r"""
if os.linesep != '\n':
expect = expect.replace('\n', os.linesep)
test.run(arguments = '-Q .', stdout=expect)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
andrewyoung1991/scons
|
test/Progress/spinner.py
|
Python
|
mit
| 2,151
| 0.00093
|
#!/usr/bin/env python
#
# This is run by Travis-CI before an upgrade to load some data into the
# database. After the upgrade is complete, the data is verified by
# upgrade-after.py to make sure that the upgrade of the database went smoothly.
#
import logging
import unittest
import sys
sys.path.insert(0, '..')
sys.path.insert(0, '../pynipap')
sys.path.insert(0, '../nipap')
sys.path.insert(0, '../nipap-cli')
from nipap.backend import Nipap
from nipap.authlib import SqliteAuth
from nipap.nipapconfig import NipapConfig
from pynipap import AuthOptions, VRF, Pool, Prefix, NipapNonExistentError, NipapDuplicateError, NipapValueError
import pynipap
pynipap.xmlrpc_uri = 'http://unittest:gottatest@127.0.0.1:1337'
o = AuthOptions({
'authoritative_source': 'nipap'
})
class TestHelper:
@classmethod
def clear_database(cls):
cfg = NipapConfig('/etc/nipap/nipap.conf')
n = Nipap()
# have to delete hosts before we can delete the rest
n._execute("DELETE FROM ip_net_plan WHERE masklen(prefix) = 32")
# the rest
n._execute("DELETE FROM ip_net_plan")
# delete all except for the default VRF with id 0
n._execute("DELETE FROM ip_net_vrf WHERE id > 0")
# set default info for VRF 0
n._execute("UPDATE ip_net_vrf SET name = 'default', description = 'The default VRF, typically the Internet.' WHERE id = 0")
n._execute("DELETE FROM ip_net_pool")
n._execute("DELETE FROM ip_net_asn")
def add_prefix(self, prefix, type, description, tags=None):
if tags is None:
tags = []
p = Prefix()
p.prefix = prefix
p.type = type
p.description = description
p.tags = tags
p.save()
return p
class TestLoad(unittest.TestCase):
""" Load some data into the database
"""
def test_load_data(self):
"""
"""
th = TestHelper()
p1 = th.add_prefix('192.168.0.0/16', 'reservation', 'test')
p2 = th.add_prefix('192.168.0.0/20', 'reservation', 'test')
p3 = th.add_prefix('192.168.0.0/24', 'reservation', 'test')
p4 = th.add_prefix('192.168.1.0/24', 'reservation', 'test')
p5 = th.add_prefix('192.168.2.0/24', 'reservation', 'test')
p6 = th.add_prefix('192.168.32.0/20', 'reservation', 'test')
p7 = th.add_prefix('192.168.32.0/24', 'reservation', 'test')
p8 = th.add_prefix('192.168.32.1/32', 'reservation', 'test')
ps1 = th.add_prefix('2001:db8:1::/48', 'reservation', 'test')
ps2 = th.add_prefix('2001:db8:1::/64', 'reservation', 'test')
ps3 = th.add_prefix('2001:db8:2::/48', 'reservation', 'test')
pool1 = Pool()
pool1.name = 'upgrade-test'
pool1.ipv4_default_prefix_length = 31
pool1.ipv6_default_prefix_length = 112
pool1.save()
p2.pool = pool1
p2.save()
ps1.pool = pool1
ps1.save()
pool2 = Pool()
pool2.name = 'upgrade-test2'
pool2.save()
vrf1 = VRF()
vrf1.name = 'foo'
vrf1.rt = '123:123'
vrf1.save()
if __name__ == '__main__':
# set up logging
log = logging.getLogger()
logging.basicConfig()
log.setLevel(logging.INFO)
if sys.version_info >= (2,7):
unittest.main(verbosity=2)
else:
unittest.main()
|
ettrig/NIPAP
|
tests/upgrade-before.py
|
Python
|
mit
| 3,381
| 0.003253
|
"""tornado_elasticsearch extends the official elasticsearch library adding
asynchronous support for the Tornado stack.
See http://elasticsearch-py.readthedocs.org/en/latest/ for information
on how to use the API beyond the introduction for how to use with Tornado::
from tornado import gen
from tornado import web
from tornado_elasticsearch import AsyncElasticsearch
class Info(web.RequestHandler):
@web.asynchronous
@gen.engine
def get(self, *args, **kwargs):
es = AsyncElasticsearch()
info = yield es.info()
self.finish(info)
"""
from elasticsearch.connection.base import Connection
from elasticsearch import exceptions
from elasticsearch.client import Elasticsearch
from elasticsearch.transport import Transport, TransportError
from elasticsearch.client.utils import query_params, _make_path
from tornado import concurrent
from tornado import gen
from tornado import httpclient
import logging
import time
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
from tornado import version
__version__ = '0.5.0'
LOGGER = logging.getLogger(__name__)
class AsyncHttpConnection(Connection):
"""Add Tornado Asynchronous support to ElasticSearch.
:param str host: The host for the connection
:param int port: The port for the connection
:param str|tuple http_auth: optional http auth information as either a
colon delimited string ``("username:password")`` or
tuple ``(username, password)``
:param int request_timeout: optional default timeout in seconds
:arg use_ssl: use ssl for the connection if ``True``
"""
_auth_user = None
_auth_password = None
_user_agent = 'tornado_elasticsearch %s/Tornado %s' % (__version__, version)
ssl_transport_schema = 'https'
def __init__(self, host='localhost', port=9200, http_auth=None,
use_ssl=False, request_timeout=None, max_clients=10, **kwargs):
super(AsyncHttpConnection, self).__init__(host=host, port=port,
**kwargs)
self._assign_auth_values(http_auth)
self.base_url = '%s://%s:%s%s' % (self.ssl_transport_schema if use_ssl
else self.transport_schema,
host, port, self.url_prefix)
httpclient.AsyncHTTPClient.configure(None, max_clients=max_clients)
self._client = httpclient.AsyncHTTPClient()
self._headers = {'Content-Type': 'application/json; charset=UTF-8'}
self._start_time = None
self.request_timeout = request_timeout
@concurrent.return_future
def perform_request(self, method, url, params=None, body=None,
timeout=None, ignore=(), callback=None):
request_uri = self._request_uri(url, params)
LOGGER.debug('%s, %r, %r', url, body, params)
kwargs = self._request_kwargs(method, body, timeout)
self._start_time = time.time()
def on_response(response):
duration = time.time() - self._start_time
raw_data = response.body.decode('utf-8') \
if response.body is not None else None
LOGGER.info('Response from %s: %s', url, response.code)
if not (200 <= response.code < 300) and \
response.code not in ignore:
LOGGER.debug('Error: %r', raw_data)
self.log_request_fail(method, request_uri, url, body, duration,
response.code)
error = exceptions.HTTP_EXCEPTIONS.get(response.code,
TransportError)
raise error(response.code, raw_data)
self.log_request_success(method, request_uri, url, body,
response.code, raw_data, duration)
callback((response.code, response.headers, raw_data))
LOGGER.debug('Fetching [%s] %s', kwargs['method'], request_uri)
LOGGER.debug('kwargs: %r', kwargs)
self._client.fetch(httpclient.HTTPRequest(request_uri, **kwargs),
callback=on_response)
def _assign_auth_values(self, http_auth):
"""Take the http_auth value and split it into the attributes that
carry the http auth username and password
:param str|tuple http_auth: The http auth value
"""
if not http_auth:
pass
elif isinstance(http_auth, (tuple, list)):
self._auth_user, self._auth_password = http_auth
elif isinstance(http_auth, str):
self._auth_user, self._auth_password = http_auth.split(':')
else:
raise ValueError('HTTP Auth Credentials should be str or '
'tuple, not %s' % type(http_auth))
def _request_kwargs(self, method, body, timeout):
if body and method == 'GET':
method = 'POST'
kwargs = {'method': method, 'user_agent': self._user_agent,
'headers': self._headers}
if self.request_timeout is not None:
kwargs['request_timeout'] = self.request_timeout
if self._auth_user and self._auth_password:
kwargs['auth_username'] = self._auth_user
kwargs['auth_password'] = self._auth_password
if body:
kwargs['body'] = body
if timeout:
kwargs['request_timeout'] = timeout
kwargs['allow_nonstandard_methods'] = True
return kwargs
def _request_uri(self, url, params):
uri = self.url_prefix + url
if params:
uri = '%s?%s' % (uri, urlencode(params or {}))
return '%s%s' % (self.base_url, uri)
class AsyncTransport(Transport):
@gen.coroutine
def perform_request(self, method, url, params=None, body=None):
"""Perform the actual request. Retrieve a connection from the
connection pool, pass all the information to it's perform_request
method and return the data.
If an exception was raised, mark the connection as failed and retry (up
to `max_retries` times).
If the operation was successful and the connection used was previously
marked as dead, mark it as live, resetting it's failure count.
:param method: HTTP method to use
:param url: absolute url (without host) to target
:param params: dictionary of query parameters, will be handed over to
the underlying :class:`~torando_elasticsearch.AsyncHTTPConnection`
class for serialization
:param body: body of the request, will be serialized using serializer
and passed to the connection
"""
if body is not None:
body = self.serializer.dumps(body)
# some clients or environments don't support sending GET with body
if method in ('HEAD', 'GET') and self.send_get_body_as != 'GET':
# send it as post instead
if self.send_get_body_as == 'POST':
method = 'POST'
# or as source parameter
elif self.send_get_body_as == 'source':
if params is None:
params = {}
params['source'] = body
body = None
if body is not None:
try:
body = body.encode('utf-8')
except (UnicodeDecodeError, AttributeError):
# bytes/str - no need to re-encode
pass
ignore = ()
if params and 'ignore' in params:
ignore = params.pop('ignore')
if isinstance(ignore, int):
ignore = (ignore, )
for attempt in range(self.max_retries + 1):
connection = self.get_connection()
try:
result = yield connection.perform_request(method, url,
params, body,
ignore=ignore)
(status, headers, data) = result
except TransportError as e:
retry = False
if isinstance(e, exceptions.ConnectionTimeout):
retry = self.retry_on_timeout
elif isinstance(e, exceptions.ConnectionError):
retry = True
elif e.status_code in self.retry_on_status:
retry = True
if retry:
# only mark as dead if we are retrying
self.mark_dead(connection)
# raise exception on last retry
if attempt == self.max_retries:
raise
else:
raise
else:
# connection didn't fail, confirm it's live status
self.connection_pool.mark_live(connection)
response = self.deserializer.loads(data,
headers.get('content-type')
) if data else None
raise gen.Return((status, response))
class AsyncElasticsearch(Elasticsearch):
"""Extends the official elasticsearch.Elasticsearch object to make the
client invoked methods coroutines.
"""
def __init__(self, hosts=None, **kwargs):
"""Create a new AsyncElasticsearch instance
"""
kwargs['connection_class'] = AsyncHttpConnection
kwargs['transport_class'] = AsyncTransport
super(AsyncElasticsearch, self).__init__(hosts, **kwargs)
@gen.coroutine
@query_params()
def ping(self, params=None):
""" Returns True if the cluster is up, False otherwise. """
try:
self.transport.perform_request('HEAD', '/', params=params)
except TransportError:
raise gen.Return(False)
raise gen.Return(True)
@gen.coroutine
@query_params()
def info(self, params=None):
"""Get the basic info from the current cluster.
:rtype: dict
"""
_, data = yield self.transport.perform_request('GET', '/',
params=params)
raise gen.Return(data)
@gen.coroutine
def health(self, params=None):
"""Coroutine. Queries cluster Health API.
Returns a 2-tuple, where first element is request status, and second
element is a dictionary with response data.
:param params: dictionary of query parameters, will be handed over to
the underlying :class:`~torando_elasticsearch.AsyncHTTPConnection`
class for serialization
"""
status, data = yield self.transport.perform_request(
"GET", "/_cluster/health", params=params)
raise gen.Return((status, data))
@gen.coroutine
@query_params('consistency', 'id', 'parent', 'percolate', 'refresh',
'replication', 'routing', 'timeout', 'timestamp', 'ttl',
'version', 'version_type')
def create(self, index, doc_type, body, id=None, params=None):
"""
Adds a typed JSON document in a specific index, making it searchable.
Behind the scenes this method calls index(..., op_type='create')
`<http://elasticsearch.org/guide/reference/api/index_/>`_
:arg index: The name of the index
:arg doc_type: The type of the document
:arg id: Document ID
:arg body: The document
:arg consistency: Explicit write consistency setting for the operation
:arg id: Specific document ID (when the POST method is used)
:arg parent: ID of the parent document
:arg percolate: Percolator queries to execute while indexing the doc
:arg refresh: Refresh the index after performing the operation
:arg replication: Specific replication type (default: sync)
:arg routing: Specific routing value
:arg timeout: Explicit operation timeout
:arg timestamp: Explicit timestamp for the document
:arg ttl: Expiration time for the document
:arg version: Explicit version number for concurrency control
:arg version_type: Specific version type
"""
result = yield self.index(index, doc_type, body, id=id, params=params,
op_type='create')
raise gen.Return(result)
@gen.coroutine
@query_params('consistency', 'op_type', 'parent', 'percolate', 'refresh',
'replication', 'routing', 'timeout', 'timestamp', 'ttl',
'version', 'version_type')
def index(self, index, doc_type, body, id=None, params=None):
"""
Adds or updates a typed JSON document in a specific index, making it
searchable. `<http://elasticsearch.org/guide/reference/api/index_/>`_
:arg index: The name of the index
:arg doc_type: The type of the document
:arg body: The document
:arg id: Document ID
:arg consistency: Explicit write consistency setting for the operation
:arg op_type: Explicit operation type (default: index)
:arg parent: ID of the parent document
:arg percolate: Percolator queries to execute while indexing the doc
:arg refresh: Refresh the index after performing the operation
:arg replication: Specific replication type (default: sync)
:arg routing: Specific routing value
:arg timeout: Explicit operation timeout
:arg timestamp: Explicit timestamp for the document
:arg ttl: Expiration time for the document
:arg version: Explicit version number for concurrency control
:arg version_type: Specific version type
"""
_, data = yield self.transport.perform_request(
'PUT' if id else 'POST', _make_path(index, doc_type, id),
params=params, body=body)
raise gen.Return(data)
@gen.coroutine
@query_params('parent', 'preference', 'realtime', 'refresh', 'routing')
def exists(self, index, id, doc_type='_all', params=None):
"""
Returns a boolean indicating whether or not given document exists in
Elasticsearch. `<http://elasticsearch.org/guide/reference/api/get/>`_
:arg index: The name of the index
:arg id: The document ID
:arg doc_type: The type of the document (uses `_all` by default to
fetch the first document matching the ID across all types)
:arg parent: The ID of the parent document
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg realtime: Specify whether to perform the operation in realtime or
search mode
:arg refresh: Refresh the shard containing the document before
performing the operation
:arg routing: Specific routing value
"""
try:
self.transport.perform_request(
'HEAD', _make_path(index, doc_type, id), params=params)
except exceptions.NotFoundError:
return gen.Return(False)
raise gen.Return(True)
@gen.coroutine
@query_params('_source', '_source_exclude', '_source_include', 'fields',
'parent', 'preference', 'realtime', 'refresh', 'routing')
def get(self, index, id, doc_type='_all', params=None):
"""
Get a typed JSON document from the index based on its id.
`<http://elasticsearch.org/guide/reference/api/get/>`_
:arg index: The name of the index
:arg id: The document ID
:arg doc_type: The type of the document (uses `_all` by default to
fetch the first document matching the ID across all types)
:arg _source: True or false to return the _source field or not, or a
list of fields to return
:arg _source_exclude: A list of fields to exclude from the returned
_source field
:arg _source_include: A list of fields to extract and return from the
_source field
:arg fields: A comma-separated list of fields to return in the response
:arg parent: The ID of the parent document
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg realtime: Specify whether to perform the operation in realtime or
search mode
:arg refresh: Refresh the shard containing the document before
performing the operation
:arg routing: Specific routing value
"""
_, data = yield self.transport.perform_request(
'GET', _make_path(index, doc_type, id), params=params)
raise gen.Return(data)
@gen.coroutine
@query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable',
'local')
def get_alias(self, index=None, name=None, params=None):
"""
Retrieve a specified alias.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html>`_
:arg index: A comma-separated list of index names to filter aliases
:arg name: A comma-separated list of alias names to return
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both., default 'all',
valid choices are: 'open', 'closed', 'none', 'all'
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg local: Return local information, do not retrieve the state from
master node (default: false)
"""
_, result = yield self.transport.perform_request(
'GET', _make_path(index, '_alias', name), params=params)
raise gen.Return(result)
@gen.coroutine
@query_params('_source_exclude', '_source_include', 'parent', 'preference',
'realtime', 'refresh', 'routing')
def get_source(self, index, id, doc_type='_all', params=None):
"""
Get the source of a document by it's index, type and id.
`<http://elasticsearch.org/guide/reference/api/get/>`_
:arg index: The name of the index
:arg doc_type: The type of the document (uses `_all` by default to
fetch the first document matching the ID across all types)
:arg id: The document ID
:arg exclude: A list of fields to exclude from the returned
_source field
:arg include: A list of fields to extract and return from the
_source field
:arg parent: The ID of the parent document
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg realtime: Specify whether to perform the operation in realtime or
search mode
:arg refresh: Refresh the shard containing the document before
performing the operation
:arg routing: Specific routing value
"""
_, data = yield self.transport.perform_request(
'GET', _make_path(index, doc_type, id, '_source'), params=params)
raise gen.Return(data)
@gen.coroutine
@query_params('_source', '_source_exclude', '_source_include', 'fields',
'parent', 'preference', 'realtime', 'refresh', 'routing')
def mget(self, body, index=None, doc_type=None, params=None):
"""
Get multiple documents based on an index, type (optional) and ids.
`<http://elasticsearch.org/guide/reference/api/multi-get/>`_
:arg body: Document identifiers; can be either `docs` (containing full
document information) or `ids` (when index and type is provided
in the URL.
:arg index: The name of the index
:arg doc_type: The type of the document
:arg _source: True or false to return the _source field or not, or a
list of fields to return
:arg _source_exclude: A list of fields to exclude from the returned
_source field
:arg _source_include: A list of fields to extract and return from the
_source field
:arg fields: A comma-separated list of fields to return in the response
:arg parent: The ID of the parent document
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg realtime: Specify whether to perform the operation in realtime or
search mode
:arg refresh: Refresh the shard containing the document before
performing the operation
:arg routing: Specific routing value
"""
_, data = yield self.transport.perform_request(
'GET', _make_path(index, doc_type, '_mget'),
params=params, body=body)
raise gen.Return(data)
@gen.coroutine
@query_params('consistency', 'fields', 'lang', 'parent', 'percolate',
'refresh', 'replication', 'retry_on_conflict', 'routing',
'script', 'timeout', 'timestamp', 'ttl', 'version',
'version_type')
def update(self, index, doc_type, id, body=None, params=None):
"""
Update a document based on a script or partial data provided.
`<http://elasticsearch.org/guide/reference/api/update/>`_
:arg index: The name of the index
:arg doc_type: The type of the document
:arg id: Document ID
:arg body: The request definition using either `script` or partial `doc`
:arg consistency: Explicit write consistency setting for the operation
:arg fields: A comma-separated list of fields to return in the response
:arg lang: The script language (default: mvel)
:arg parent: ID of the parent document
:arg percolate: Perform percolation during the operation; use specific
registered query name, attribute, or wildcard
:arg refresh: Refresh the index after performing the operation
:arg replication: Specific replication type (default: sync)
:arg retry_on_conflict: Specify how many times should the operation be
retried when a conflict occurs (default: 0)
:arg routing: Specific routing value
:arg script: The URL-encoded script definition (instead of using
request body)
:arg timeout: Explicit operation timeout
:arg timestamp: Explicit timestamp for the document
:arg ttl: Expiration time for the document
:arg version: Explicit version number for concurrency control
:arg version_type: Explicit version number for concurrency control
"""
_, data = yield self.transport.perform_request('POST',
_make_path(index,
doc_type, id,
'_update'),
params=params, body=body)
raise gen.Return(data)
@gen.coroutine
@query_params('_source', '_source_exclude', '_source_include',
'analyze_wildcard', 'analyzer', 'default_operator', 'df',
'explain', 'fields', 'ignore_indices', 'indices_boost',
'lenient', 'lowercase_expanded_terms', 'from_', 'preference',
'q', 'routing', 'scroll', 'search_type', 'size', 'sort',
'source', 'stats', 'suggest_field', 'suggest_mode',
'suggest_size', 'suggest_text', 'timeout', 'version')
def search(self, index=None, doc_type=None, body=None, params=None):
"""
Execute a search query and get back search hits that match the query.
`<http://www.elasticsearch.org/guide/reference/api/search/>`_
:arg index: A comma-separated list of index names to search; use `_all`
or empty string to perform the operation on all indices
:arg doc_type: A comma-separated list of document types to search;
leave empty to perform the operation on all types
:arg body: The search definition using the Query DSL
:arg _source: True or false to return the _source field or not, or a
list of fields to return
:arg _source_exclude: A list of fields to exclude from the returned
_source field
:arg _source_include: A list of fields to extract and return from the
_source field
:arg analyze_wildcard: Specify whether wildcard and prefix queries
should be analyzed (default: false)
:arg analyzer: The analyzer to use for the query string
:arg default_operator: The default operator for query string query (AND
or OR) (default: OR)
:arg df: The field to use as default where no field prefix is given in
the query string
:arg explain: Specify whether to return detailed information about
score computation as part of a hit
:arg fields: A comma-separated list of fields to return as part of a hit
:arg ignore_indices: When performed on multiple indices, allows to
ignore `missing` ones (default: none)
:arg indices_boost: Comma-separated list of index boosts
:arg lenient: Specify whether format-based query failures (such as
providing text to a numeric field) should be ignored
:arg lowercase_expanded_terms: Specify whether query terms should be
lowercased
:arg from_: Starting offset (default: 0)
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg q: Query in the Lucene query string syntax
:arg routing: A comma-separated list of specific routing values
:arg scroll: Specify how long a consistent view of the index should be
maintained for scrolled search
:arg search_type: Search operation type
:arg size: Number of hits to return (default: 10)
:arg sort: A comma-separated list of <field>:<direction> pairs
:arg source: The URL-encoded request definition using the Query DSL
(instead of using request body)
:arg stats: Specific 'tag' of the request for logging and statistical
purposes
:arg suggest_field: Specify which field to use for suggestions
:arg suggest_mode: Specify suggest mode (default: missing)
:arg suggest_size: How many suggestions to return in response
:arg suggest_text: The source text for which the suggestions should be
returned
:arg timeout: Explicit operation timeout
:arg version: Specify whether to return document version as part of a
hit
"""
# from is a reserved word so it cannot be used, use from_ instead
if 'from_' in params:
params['from'] = params.pop('from_')
if doc_type and not index:
index = '_all'
_, data = yield self.transport.perform_request('GET',
_make_path(index,
doc_type,
'_search'),
params=params,
body=body)
raise gen.Return(data)
@gen.coroutine
@query_params('_source', '_source_exclude', '_source_include',
'analyze_wildcard', 'analyzer', 'default_operator',
'df', 'fields', 'lenient', 'lowercase_expanded_terms',
'parent', 'preference', 'q', 'routing', 'source')
def explain(self, index, doc_type, id, body=None, params=None):
"""
The explain api computes a score explanation for a query and a specific
document. This can give useful feedback whether a document matches or
didn't match a specific query.
`<http://elasticsearch.org/guide/reference/api/explain/>`_
:arg index: The name of the index
:arg doc_type: The type of the document
:arg id: The document ID
:arg body: The query definition using the Query DSL
:arg _source: True or false to return the _source field or not, or a
list of fields to return
:arg _source_exclude: A list of fields to exclude from the returned
_source field
:arg _source_include: A list of fields to extract and return from the
_source field
:arg analyze_wildcard: Specify whether wildcards and prefix queries in
the query string query should be analyzed (default: false)
:arg analyzer: The analyzer for the query string query
:arg default_operator: The default operator for query string query (AND
or OR), (default: OR)
:arg df: The default field for query string query (default: _all)
:arg fields: A comma-separated list of fields to return in the response
:arg lenient: Specify whether format-based query failures (such as
providing text to a numeric field) should be ignored
:arg lowercase_expanded_terms: Specify whether query terms should be
lowercased
:arg parent: The ID of the parent document
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg q: Query in the Lucene query string syntax
:arg routing: Specific routing value
:arg source: The URL-encoded query definition (instead of using the
request body)
"""
_, data = yield self.transport.perform_request('GET',
_make_path(index,
doc_type, id,
'_explain'),
params=params, body=body)
raise gen.Return(data)
@gen.coroutine
@query_params()
def scroll(self, scroll_id, scroll, params=None):
"""
Scroll a search request created by specifying the scroll parameter.
`<http://www.elasticsearch.org/guide/reference/api/search/scroll/>`_
:arg scroll_id: The scroll ID
:arg scroll: Specify how long a consistent view of the index should be
maintained for scrolled search
"""
body = {
"scroll": scroll,
"scroll_id": scroll_id
}
if params:
if "scroll" in params.keys():
params.pop("scroll")
if "scroll_id" in params.keys():
params.pop("scroll_id")
_, data = yield self.transport.perform_request('POST',
_make_path('_search',
'scroll'),
body=body,
params=params)
raise gen.Return(data)
@gen.coroutine
@query_params()
def clear_scroll(self, scroll_id, params=None):
"""
Clear the scroll request created by specifying the scroll parameter to
search.
`<http://www.elasticsearch.org/guide/reference/api/search/scroll/>`_
:arg scroll_id: The scroll ID or a list of scroll IDs
"""
if not isinstance(scroll_id, list):
scroll_id = [scroll_id]
body = {
"scroll_id": scroll_id
}
if params and "scroll_id" in params.keys():
params.pop("scroll_id")
_, data = yield self.transport.perform_request('DELETE',
_make_path('_search',
'scroll'),
body=body,
params=params)
raise gen.Return(data)
@gen.coroutine
@query_params('consistency', 'parent', 'refresh', 'replication', 'routing',
'timeout', 'version', 'version_type')
def delete(self, index, doc_type, id, params=None):
"""
Delete a typed JSON document from a specific index based on its id.
`<http://elasticsearch.org/guide/reference/api/delete/>`_
:arg index: The name of the index
:arg doc_type: The type of the document
:arg id: The document ID
:arg consistency: Specific write consistency setting for the operation
:arg parent: ID of parent document
:arg refresh: Refresh the index after performing the operation
:arg replication: Specific replication type (default: sync)
:arg routing: Specific routing value
:arg timeout: Explicit operation timeout
:arg version: Explicit version number for concurrency control
:arg version_type: Specific version type
"""
_, data = yield self.transport.perform_request('DELETE',
_make_path(index,
doc_type, id),
params=params)
raise gen.Return(data)
@gen.coroutine
@query_params('ignore_indices', 'min_score', 'preference', 'routing',
'source')
def count(self, index=None, doc_type=None, body=None, params=None):
"""
Execute a query and get the number of matches for that query.
`<http://elasticsearch.org/guide/reference/api/count/>`_
:arg index: A comma-separated list of indices to restrict the results
:arg doc_type: A comma-separated list of types to restrict the results
:arg body: A query to restrict the results (optional)
:arg ignore_indices: When performed on multiple indices, allows to
ignore `missing` ones (default: none)
:arg min_score: Include only documents with a specific `_score` value
in the result
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg routing: Specific routing value
:arg source: The URL-encoded query definition (instead of using the
request body)
"""
_, data = yield self.transport.perform_request('POST',
_make_path(index,
doc_type,
'_count'),
params=params, body=body)
raise gen.Return(data)
@gen.coroutine
@query_params('consistency', 'refresh', 'replication')
def bulk(self, body, index=None, doc_type=None, params=None):
"""
Perform many index/delete operations in a single API call.
`<http://elasticsearch.org/guide/reference/api/bulk/>`_
See the :func:`~elasticsearch.helpers.bulk_index` for a more friendly
API.
:arg body: The operation definition and data (action-data pairs)
:arg index: Default index for items which don't provide one
:arg doc_type: Default document type for items which don't provide one
:arg consistency: Explicit write consistency setting for the operation
:arg refresh: Refresh the index after performing the operation
:arg replication: Explicitly set the replication type (efault: sync)
"""
_, data = yield self.transport.perform_request('POST',
_make_path(index,
doc_type,
'_bulk'),
params=params,
body=self._bulk_body(body))
raise gen.Return(data)
@gen.coroutine
@query_params('search_type')
def msearch(self, body, index=None, doc_type=None, params=None):
"""
Execute several search requests within the same API.
`<http://www.elasticsearch.org/guide/reference/api/multi-search/>`_
:arg body: The request definitions (metadata-search request definition
pairs), separated by newlines
:arg index: A comma-separated list of index names to use as default
:arg doc_type: A comma-separated list of document types to use as default
:arg search_type: Search operation type
"""
_, data = yield self.transport.perform_request('GET',
_make_path(index,
doc_type,
'_msearch'),
params=params,
body=self._bulk_body(body))
raise gen.Return(data)
@gen.coroutine
@query_params('consistency', 'ignore_indices', 'replication', 'routing',
'source', 'timeout', 'q')
def delete_by_query(self, index, doc_type=None, body=None, params=None):
"""
Delete documents from one or more indices and one or more types based
on a query.
`<http://www.elasticsearch.org/guide/reference/api/delete-by-query/>`_
:arg index: A comma-separated list of indices to restrict the operation
:arg doc_type: A comma-separated list of types to restrict the operation
:arg body: A query to restrict the operation
:arg consistency: Specific write consistency setting for the operation
:arg ignore_indices: When performed on multiple indices, allows to
ignore `missing` ones (default: none)
:arg replication: Specific replication type (default: sync)
:arg routing: Specific routing value
:arg source: The URL-encoded query definition (instead of using the
request body)
:arg q: Query in the Lucene query string syntax
:arg timeout: Explicit operation timeout
"""
_, data = yield self.transport.perform_request('DELETE',
_make_path(index,
doc_type,
'_query'),
params=params, body=body)
raise gen.Return(data)
@gen.coroutine
@query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable',
'local')
def get_mapping(self, index=None, doc_type=None, params=None):
"""
Retrieve mapping definition of index or index/type.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-mapping.html>`_
:arg index: A comma-separated list of index names
:arg doc_type: A comma-separated list of document types
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg local: Return local information, do not retrieve the state from
master node (default: false)
"""
_, data = yield self.transport.perform_request('GET',
_make_path(index,
'_mapping',
doc_type),
params=params)
raise gen.Return(data)
@gen.coroutine
@query_params('ignore_indices', 'preference', 'routing', 'source')
def suggest(self, index=None, body=None, params=None):
"""
The suggest feature suggests similar looking terms based on a provided
text by using a suggester.
`<http://elasticsearch.org/guide/reference/api/search/suggest/>`_
:arg index: A comma-separated list of index names to restrict the
operation; use `_all` or empty string to perform the operation on
all indices
:arg body: The request definition
:arg ignore_indices: When performed on multiple indices, allows to
ignore `missing` ones (default: none)
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg routing: Specific routing value
:arg source: The URL-encoded request definition (instead of using
request body)
"""
_, data = yield self.transport.perform_request('POST',
_make_path(index,
'_suggest'),
params=params, body=body)
raise gen.Return(data)
@gen.coroutine
@query_params('prefer_local')
def percolate(self, index, doc_type, body, params=None):
"""
Send a percolate request which include a doc, and get back the queries
that match on that doc out of the set of registered queries.
`<http://elasticsearch.org/guide/reference/api/percolate/>`_
:arg index: The name of the index with a registered percolator query
:arg doc_type: The document type
:arg body: The document (`doc`) to percolate against registered queries;
optionally also a `query` to limit the percolation to specific
registered queries
:arg prefer_local: With `true`, specify that a local shard should be
used if available, with `false`, use a random shard (default: true)
"""
_, data = yield self.transport.perform_request('GET',
_make_path(index,
doc_type,
'_percolate'),
params=params, body=body)
raise gen.Return(data)
@gen.coroutine
@query_params('boost_terms', 'max_doc_freq', 'max_query_terms',
'max_word_len', 'min_doc_freq', 'min_term_freq',
'min_word_len', 'mlt_fields', 'percent_terms_to_match',
'routing', 'search_from', 'search_indices',
'search_query_hint', 'search_scroll', 'search_size',
'search_source', 'search_type', 'search_types', 'stop_words')
def mlt(self, index, doc_type, id, body=None, params=None):
"""
Get documents that are "like" a specified document.
`<http://elasticsearch.org/guide/reference/api/more-like-this/>`_
:arg index: The name of the index
:arg doc_type: The type of the document (use `_all` to fetch the first
document matching the ID across all types)
:arg id: The document ID
:arg body: A specific search request definition
:arg boost_terms: The boost factor
:arg max_doc_freq: The word occurrence frequency as count: words with
higher occurrence in the corpus will be ignored
:arg max_query_terms: The maximum query terms to be included in the
generated query
:arg max_word_len: The minimum length of the word: longer words will
be ignored
:arg min_doc_freq: The word occurrence frequency as count: words with
lower occurrence in the corpus will be ignored
:arg min_term_freq: The term frequency as percent: terms with lower
occurrence in the source document will be ignored
:arg min_word_len: The minimum length of the word: shorter words will
be ignored
:arg mlt_fields: Specific fields to perform the query against
:arg percent_terms_to_match: How many terms have to match in order to
consider the document a match (default: 0.3)
:arg routing: Specific routing value
:arg search_from: The offset from which to return results
:arg search_indices: A comma-separated list of indices to perform the
query against (default: the index containing the document)
:arg search_query_hint: The search query hint
:arg search_scroll: A scroll search request definition
:arg search_size: The number of documents to return (default: 10)
:arg search_source: A specific search request definition (instead of
using the request body)
:arg search_type: Specific search type (eg. `dfs_then_fetch`, `count`,
etc)
:arg search_types: A comma-separated list of types to perform the query
against (default: the same type as the document)
:arg stop_words: A list of stop words to be ignored
"""
_, data = yield self.transport.perform_request(
'GET', _make_path(index, doc_type, id, '_mlt'),
params=params, body=body)
raise gen.Return(data)
|
gmr/tornado-elasticsearch
|
tornado_elasticsearch.py
|
Python
|
bsd-3-clause
| 46,402
| 0.000388
|
from pulp.bindings import auth, consumer, consumer_groups, repo_groups, repository
from pulp.bindings.actions import ActionsAPI
from pulp.bindings.content import OrphanContentAPI, ContentSourceAPI, ContentCatalogAPI
from pulp.bindings.event_listeners import EventListenerAPI
from pulp.bindings.server_info import ServerInfoAPI, ServerStatusAPI
from pulp.bindings.tasks import TasksAPI, TaskSearchAPI
from pulp.bindings.upload import UploadAPI
class Bindings(object):
def __init__(self, pulp_connection):
"""
@type: pulp_connection: pulp.bindings.server.PulpConnection
"""
# Please keep the following in alphabetical order to ease reading
self.actions = ActionsAPI(pulp_connection)
self.bind = consumer.BindingsAPI(pulp_connection)
self.bindings = consumer.BindingSearchAPI(pulp_connection)
self.profile = consumer.ProfilesAPI(pulp_connection)
self.consumer = consumer.ConsumerAPI(pulp_connection)
self.consumer_content = consumer.ConsumerContentAPI(pulp_connection)
self.consumer_content_schedules = consumer.ConsumerContentSchedulesAPI(pulp_connection)
self.consumer_group = consumer_groups.ConsumerGroupAPI(pulp_connection)
self.consumer_group_search = consumer_groups.ConsumerGroupSearchAPI(pulp_connection)
self.consumer_group_actions = consumer_groups.ConsumerGroupActionAPI(pulp_connection)
self.consumer_group_bind = consumer_groups.ConsumerGroupBindAPI(pulp_connection)
self.consumer_group_content = consumer_groups.ConsumerGroupContentAPI(pulp_connection)
self.consumer_history = consumer.ConsumerHistoryAPI(pulp_connection)
self.consumer_search = consumer.ConsumerSearchAPI(pulp_connection)
self.content_orphan = OrphanContentAPI(pulp_connection)
self.content_source = ContentSourceAPI(pulp_connection)
self.content_catalog = ContentCatalogAPI(pulp_connection)
self.event_listener = EventListenerAPI(pulp_connection)
self.permission = auth.PermissionAPI(pulp_connection)
self.repo = repository.RepositoryAPI(pulp_connection)
self.repo_actions = repository.RepositoryActionsAPI(pulp_connection)
self.repo_distributor = repository.RepositoryDistributorAPI(pulp_connection)
self.repo_group = repo_groups.RepoGroupAPI(pulp_connection)
self.repo_group_actions = repo_groups.RepoGroupActionAPI(pulp_connection)
self.repo_group_distributor = repo_groups.RepoGroupDistributorAPI(pulp_connection)
self.repo_group_distributor_search = repo_groups.RepoGroupSearchAPI(pulp_connection)
self.repo_group_search = repo_groups.RepoGroupSearchAPI(pulp_connection)
self.repo_history = repository.RepositoryHistoryAPI(pulp_connection)
self.repo_importer = repository.RepositoryImporterAPI(pulp_connection)
self.repo_publish_schedules = repository.RepositoryPublishSchedulesAPI(pulp_connection)
self.repo_search = repository.RepositorySearchAPI(pulp_connection)
self.repo_sync_schedules = repository.RepositorySyncSchedulesAPI(pulp_connection)
self.repo_unit = repository.RepositoryUnitAPI(pulp_connection)
self.role = auth.RoleAPI(pulp_connection)
self.server_info = ServerInfoAPI(pulp_connection)
self.server_status = ServerStatusAPI(pulp_connection)
self.tasks = TasksAPI(pulp_connection)
self.tasks_search = TaskSearchAPI(pulp_connection)
self.uploads = UploadAPI(pulp_connection)
self.user = auth.UserAPI(pulp_connection)
self.user_search = auth.UserSearchAPI(pulp_connection)
|
rbramwell/pulp
|
bindings/pulp/bindings/bindings.py
|
Python
|
gpl-2.0
| 3,641
| 0.003845
|
import mock
from django.utils import timezone
from rest_framework.test import APIRequestFactory
from elections.api.next.api_views import BallotViewSet
class TestBallotViewSet:
def test_get_queryset_last_updated_ordered_by_modified(self):
factory = APIRequestFactory()
timestamp = timezone.now().isoformat()
request = factory.get("/next/ballots/", {"last_updated": timestamp})
request.query_params = request.GET
view = BallotViewSet(request=request)
view.queryset = mock.MagicMock()
view.get_queryset()
view.queryset.with_last_updated.assert_called_once()
def test_get_queryset_last_updated_not_ordered(self):
factory = APIRequestFactory()
request = factory.get("/next/ballots/")
request.query_params = request.GET
view = BallotViewSet(request=request)
view.queryset = mock.MagicMock()
view.get_queryset()
view.queryset.with_last_updated.assert_not_called()
|
DemocracyClub/yournextrepresentative
|
ynr/apps/elections/tests/test_viewsets.py
|
Python
|
agpl-3.0
| 994
| 0
|
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('WebPage')
log.setLevel(logging.ERROR)
log.addHandler(NullHandler())
import os
import web
from viz import Viz
import WebPage
import WebHandler
class WebHandlerDyn(WebHandler.WebHandler):
def getPage(self,subResource,username):
return self.getPageDyn(dynPath=self.getDynPath(),
subResource=subResource,
username=username)
def getData(self,subResource,username):
return self.getDataDyn(dynPath=self.getDynPath(),
subResource=subResource,
username=username)
def postData(self,receivedData,subResource,username):
return self.postDataDyn(receivedData=receivedData,
dynPath=self.getDynPath(),
subResource=subResource,
username=username)
def getDynPath(self):
elems = WebPage.WebPage.urlStringTolist(web.ctx.path)
for e in elems:
if e.startswith('_'):
return e[1:]
class WebPageDyn(WebPage.WebPage):
def __init__(self,subPageLister=None,
subPageHandler=None,
**fvars):
assert callable(subPageLister)
# store params
self.subPageLister = subPageLister
self.subPageHandler = subPageHandler
# initialize parent class
WebPage.WebPage.__init__(self,**fvars)
# register subPageHandler
self.registerPage(WebPage.WebPage(webServer = self.webServer,
url = '_[.%%\w-]*',
title = '',
webHandler = self.subPageHandler))
def getUrlHierarchy(self,parentPath=[]):
# run the parent class' function
returnVal = WebPage.WebPage.getUrlHierarchy(self,parentPath)
# modify the children
returnVal['children'] = []
for sub in self.subPageLister():
classUrl = parentPath+[self.url]+[sub['url']]
if len(classUrl) and not classUrl[0]:
classUrl = classUrl[1:]
returnVal['children'] += [
{
'url': self.urlListToString(parentPath+[self.url]+['_'+sub['url']]),
'title': sub['title'],
'class': self.webServer.getDocumentation().getClass(classUrl),
'children': [],
}
]
return returnVal
|
twatteyne/dustlink_academy
|
views/web/dustWeb/WebPageDyn.py
|
Python
|
bsd-3-clause
| 2,973
| 0.019509
|
# REST API Backend for the Radiocontrol Project
#
# Copyright (C) 2017 Stefan Derkits <stefan@derkits.at>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.contrib import admin
# Register your models here.
|
Horrendus/radiocontrol
|
api/api/admin.py
|
Python
|
agpl-3.0
| 820
| 0
|
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import test # noqa
from tempest_lib.common.utils import data_utils # noqa
from tempest_lib import exceptions as lib_exc # noqa
from manila_tempest_tests import clients_share as clients
from manila_tempest_tests.tests.api import base
class ShareTypesAdminNegativeTest(base.BaseSharesAdminTest):
def _create_share_type(self):
name = data_utils.rand_name("unique_st_name")
extra_specs = self.add_required_extra_specs_to_dict({"key": "value"})
return self.create_share_type(name, extra_specs=extra_specs)
@classmethod
def resource_setup(cls):
super(ShareTypesAdminNegativeTest, cls).resource_setup()
cls.member_shares_client = clients.Manager().shares_client
@test.attr(type=["gate", "smoke", ])
def test_create_share_with_nonexistent_share_type(self):
self.assertRaises(lib_exc.NotFound,
self.create_share,
share_type_id=data_utils.rand_name("fake"))
@test.attr(type=["gate", "smoke", ])
def test_create_share_type_with_empty_name(self):
self.assertRaises(lib_exc.BadRequest, self.create_share_type, '')
@test.attr(type=["gate", "smoke", ])
def test_create_share_type_with_too_big_name(self):
self.assertRaises(lib_exc.BadRequest,
self.create_share_type,
"x" * 256)
@test.attr(type=["gate", "smoke", ])
def test_get_share_type_by_nonexistent_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.get_share_type,
data_utils.rand_name("fake"))
@test.attr(type=["gate", "smoke", ])
def test_try_delete_share_type_by_nonexistent_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.delete_share_type,
data_utils.rand_name("fake"))
@test.attr(type=["gate", "smoke", ])
def test_try_create_duplicate_of_share_type(self):
st = self._create_share_type()
self.assertRaises(lib_exc.Conflict,
self.create_share_type,
st["share_type"]["name"],
extra_specs=self.add_required_extra_specs_to_dict())
@test.attr(type=["gate", "smoke", ])
def test_add_share_type_allowed_for_public(self):
st = self._create_share_type()
self.assertRaises(lib_exc.Conflict,
self.shares_client.add_access_to_share_type,
st["share_type"]["id"],
self.shares_client.tenant_id)
@test.attr(type=["gate", "smoke", ])
def test_remove_share_type_allowed_for_public(self):
st = self._create_share_type()
self.assertRaises(lib_exc.Conflict,
self.shares_client.remove_access_from_share_type,
st["share_type"]["id"],
self.shares_client.tenant_id)
@test.attr(type=["gate", "smoke", ])
def test_add_share_type_by_nonexistent_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.add_access_to_share_type,
data_utils.rand_name("fake"),
self.shares_client.tenant_id)
@test.attr(type=["gate", "smoke", ])
def test_remove_share_type_by_nonexistent_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.remove_access_from_share_type,
data_utils.rand_name("fake"),
self.shares_client.tenant_id)
|
scality/manila
|
manila_tempest_tests/tests/api/admin/test_share_types_negative.py
|
Python
|
apache-2.0
| 4,282
| 0
|
"""
Tests for CourseData utility class.
"""
from __future__ import absolute_import
import six
from mock import patch
from lms.djangoapps.course_blocks.api import get_course_blocks
from openedx.core.djangoapps.content.block_structure.api import get_course_in_cache
from student.tests.factories import UserFactory
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from ..course_data import CourseData
class CourseDataTest(ModuleStoreTestCase):
"""
Simple tests to ensure CourseData works as advertised.
"""
def setUp(self):
super(CourseDataTest, self).setUp()
with self.store.default_store(ModuleStoreEnum.Type.split):
self.course = CourseFactory.create()
# need to re-retrieve the course since the version on the original course isn't accurate.
self.course = self.store.get_course(self.course.id)
self.user = UserFactory.create()
self.collected_structure = get_course_in_cache(self.course.id)
self.one_true_structure = get_course_blocks(
self.user, self.course.location, collected_block_structure=self.collected_structure,
)
self.expected_results = {
'course': self.course,
'collected_block_structure': self.collected_structure,
'structure': self.one_true_structure,
'course_key': self.course.id,
'location': self.course.location,
}
@patch('lms.djangoapps.grades.course_data.get_course_blocks')
def test_fill_course_data(self, mock_get_blocks):
"""
Tests to ensure that course data is fully filled with just a single input.
"""
mock_get_blocks.return_value = self.one_true_structure
for kwarg in self.expected_results: # We iterate instead of ddt due to dependence on 'self'
if kwarg == 'location':
continue # This property is purely output; it's never able to be used as input
kwargs = {kwarg: self.expected_results[kwarg]}
course_data = CourseData(self.user, **kwargs)
for arg in self.expected_results:
# No point validating the data we used as input, and c_b_s is input-only
if arg != kwarg and arg != "collected_block_structure":
expected = self.expected_results[arg]
actual = getattr(course_data, arg)
self.assertEqual(expected, actual)
def test_properties(self):
expected_edited_on = getattr(
self.one_true_structure[self.one_true_structure.root_block_usage_key],
'subtree_edited_on',
)
for kwargs in [
dict(course=self.course),
dict(collected_block_structure=self.one_true_structure),
dict(structure=self.one_true_structure),
dict(course_key=self.course.id),
]:
course_data = CourseData(self.user, **kwargs)
self.assertEquals(course_data.course_key, self.course.id)
self.assertEquals(course_data.location, self.course.location)
self.assertEquals(course_data.structure.root_block_usage_key, self.one_true_structure.root_block_usage_key)
self.assertEquals(course_data.course.id, self.course.id)
self.assertEquals(course_data.version, self.course.course_version)
self.assertEquals(course_data.edited_on, expected_edited_on)
self.assertIn(u'Course: course_key', six.text_type(course_data))
self.assertIn(u'Course: course_key', course_data.full_string())
def test_no_data(self):
with self.assertRaises(ValueError):
_ = CourseData(self.user)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_full_string(self):
empty_structure = get_course_blocks(self.user, self.course.location)
self.assertFalse(empty_structure)
# full_string retrieves value from collected_structure when structure is empty.
course_data = CourseData(
self.user, structure=empty_structure, collected_block_structure=self.collected_structure,
)
self.assertIn(u'Course: course_key: {}, version:'.format(self.course.id), course_data.full_string())
# full_string returns minimal value when structures aren't readily available.
course_data = CourseData(self.user, course_key=self.course.id)
self.assertIn(u'empty course structure', course_data.full_string())
|
ESOedX/edx-platform
|
lms/djangoapps/grades/tests/test_course_data.py
|
Python
|
agpl-3.0
| 4,628
| 0.003025
|
# Copyright 2014 Sebastien Maccagnoni-Munch
#
# This file is part of Calaos Web Installer.
#
# Calaos Web Installer is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
#
# Calaos Web Installer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Calaos Web Installer. If not, see <http://www.gnu.org/licenses/>.
import os.path
import pickle
class CalaosApi:
def __init__(self, io, rules):
self.io_path = io
self.rules_path = rules
self.readfiles()
def readfiles(self):
if os.path.exists(self.io_path):
self.io = pickle.load(file(self.io_path))
else:
self.io = []
if os.path.exists(self.rules_path):
self.rules = pickle.load(file(self.rules_path))
else:
self.rules = []
def get_config(self):
return {
'io': self.io,
'rules': self.rules
}
def writefiles(self):
pickle.dump(self.io, file(self.io_path, 'w'))
pickle.dump(self.rules, file(self.rules_path, 'w'))
|
tiramiseb/abandoned_calaos-web-installer
|
calaosapi.py
|
Python
|
agpl-3.0
| 1,520
| 0.001974
|
"""
Deployment file to facilitate releases of pymatgen.
Note that this file is meant to be run from the root directory of the pymatgen
repo.
"""
__author__ = "Shyue Ping Ong"
__email__ = "ongsp@ucsd.edu"
__date__ = "Sep 1, 2014"
import glob
import os
import json
import webbrowser
import requests
import re
import subprocess
from fabric.api import local, lcd
from pymatgen import __version__ as ver
def make_doc():
with open("CHANGES.rst") as f:
contents = f.read()
toks = re.split("\-{3,}", contents)
n = len(toks[0].split()[-1])
changes = [toks[0]]
changes.append("\n" + "\n".join(toks[1].strip().split("\n")[0:-1]))
changes = ("-" * n).join(changes)
with open("docs/latest_changes.rst", "w") as f:
f.write(changes)
with lcd("examples"):
local("ipython nbconvert --to html *.ipynb")
local("mv *.html ../docs/_static")
with lcd("docs"):
local("cp ../CHANGES.rst change_log.rst")
local("sphinx-apidoc -d 6 -o . -f ../pymatgen")
local("rm pymatgen.*.tests.rst")
for f in glob.glob("docs/*.rst"):
if f.startswith('docs/pymatgen') and f.endswith('rst'):
newoutput = []
suboutput = []
subpackage = False
with open(f, 'r') as fid:
for line in fid:
clean = line.strip()
if clean == "Subpackages":
subpackage = True
if not subpackage and not clean.endswith("tests"):
newoutput.append(line)
else:
if not clean.endswith("tests"):
suboutput.append(line)
if clean.startswith("pymatgen") and not clean.endswith("tests"):
newoutput.extend(suboutput)
subpackage = False
suboutput = []
with open(f, 'w') as fid:
fid.write("".join(newoutput))
local("make html")
local("cp _static/* _build/html/_static")
#This makes sure pymatgen.org works to redirect to the Gihub page
local("echo \"pymatgen.org\" > _build/html/CNAME")
#Avoid ths use of jekyll so that _dir works as intended.
local("touch _build/html/.nojekyll")
def publish():
local("python setup.py release")
def setver():
local("sed s/version=.*,/version=\\\"{}\\\",/ setup.py > newsetup"
.format(ver))
local("mv newsetup setup.py")
def update_doc():
make_doc()
with lcd("docs/_build/html/"):
local("git add .")
local("git commit -a -m \"Update dev docs\"")
local("git push origin gh-pages")
def merge_stable():
local("git commit -a -m \"v%s release\"" % ver)
local("git push")
local("git checkout stable")
local("git pull")
local("git merge master")
local("git push")
local("git checkout master")
def release_github():
with open("CHANGES.rst") as f:
contents = f.read()
toks = re.split("\-+", contents)
desc = toks[1].strip()
payload = {
"tag_name": "v" + ver,
"target_commitish": "master",
"name": "v" + ver,
"body": desc,
"draft": False,
"prerelease": False
}
response = requests.post(
"https://api.github.com/repos/materialsproject/pymatgen/releases",
data=json.dumps(payload),
headers={"Authorization": "token " + os.environ["GITHUB_RELEASES_TOKEN"]})
print response.text
def update_changelog():
output = subprocess.check_output(["git", "log", "--pretty=format:%s",
"v%s..HEAD" % ver])
lines = ["* " + l for l in output.strip().split("\n")]
with open("CHANGES.rst") as f:
contents = f.read()
toks = contents.split("==========")
toks.insert(-1, "\n\n" + "\n".join(lines))
with open("CHANGES.rst", "w") as f:
f.write("==========".join(toks))
def log_ver():
filepath = os.path.join(os.environ["HOME"], "Dropbox", "Public",
"pymatgen", ver)
with open(filepath, "w") as f:
f.write("Release")
def release(skip_test=False):
setver()
if not skip_test:
local("nosetests")
publish()
log_ver()
update_doc()
merge_stable()
release_github()
def open_doc():
pth = os.path.abspath("docs/_build/html/index.html")
webbrowser.open("file://" + pth)
|
yanikou19/pymatgen
|
fabfile.py
|
Python
|
mit
| 4,544
| 0.001761
|
from __future__ import unicode_literals
from django.test import SimpleTestCase
from localflavor.is_.forms import (ISIdNumberField, ISPhoneNumberField,
ISPostalCodeSelect)
class ISLocalFlavorTests(SimpleTestCase):
def test_ISPostalCodeSelect(self):
f = ISPostalCodeSelect()
out = '''<select name="foo">
<option value="101">101 Reykjav\xedk</option>
<option value="103">103 Reykjav\xedk</option>
<option value="104">104 Reykjav\xedk</option>
<option value="105">105 Reykjav\xedk</option>
<option value="107">107 Reykjav\xedk</option>
<option value="108">108 Reykjav\xedk</option>
<option value="109">109 Reykjav\xedk</option>
<option value="110">110 Reykjav\xedk</option>
<option value="111">111 Reykjav\xedk</option>
<option value="112">112 Reykjav\xedk</option>
<option value="113">113 Reykjav\xedk</option>
<option value="116">116 Kjalarnes</option>
<option value="121">121 Reykjav\xedk</option>
<option value="123">123 Reykjav\xedk</option>
<option value="124">124 Reykjav\xedk</option>
<option value="125">125 Reykjav\xedk</option>
<option value="127">127 Reykjav\xedk</option>
<option value="128">128 Reykjav\xedk</option>
<option value="129">129 Reykjav\xedk</option>
<option value="130">130 Reykjav\xedk</option>
<option value="132">132 Reykjav\xedk</option>
<option value="150">150 Reykjav\xedk</option>
<option value="155">155 Reykjav\xedk</option>
<option value="170">170 Seltjarnarnes</option>
<option value="172">172 Seltjarnarnes</option>
<option value="190">190 Vogar</option>
<option value="200">200 K\xf3pavogur</option>
<option value="201">201 K\xf3pavogur</option>
<option value="202">202 K\xf3pavogur</option>
<option value="203">203 K\xf3pavogur</option>
<option value="210">210 Gar\xf0ab\xe6r</option>
<option value="212">212 Gar\xf0ab\xe6r</option>
<option value="220">220 Hafnarfj\xf6r\xf0ur</option>
<option value="221">221 Hafnarfj\xf6r\xf0ur</option>
<option value="222">222 Hafnarfj\xf6r\xf0ur</option>
<option value="225">225 \xc1lftanes</option>
<option value="230">230 Reykjanesb\xe6r</option>
<option value="232">232 Reykjanesb\xe6r</option>
<option value="233">233 Reykjanesb\xe6r</option>
<option value="235">235 Keflav\xedkurflugv\xf6llur</option>
<option value="240">240 Grindav\xedk</option>
<option value="245">245 Sandger\xf0i</option>
<option value="250">250 Gar\xf0ur</option>
<option value="260">260 Reykjanesb\xe6r</option>
<option value="270">270 Mosfellsb\xe6r</option>
<option value="271">271 Mosfellsb\xe6r</option>
<option value="276">276 Mosfellsb\xe6r</option>
<option value="300">300 Akranes</option>
<option value="301">301 Akranes</option>
<option value="302">302 Akranes</option>
<option value="310">310 Borgarnes</option>
<option value="311">311 Borgarnes</option>
<option value="320">320 Reykholt \xed Borgarfir\xf0i</option>
<option value="340">340 Stykkish\xf3lmur</option>
<option value="345">345 Flatey \xe1 Brei\xf0afir\xf0i</option>
<option value="350">350 Grundarfj\xf6r\xf0ur</option>
<option value="355">355 \xd3lafsv\xedk</option>
<option value="356">356 Sn\xe6fellsb\xe6r</option>
<option value="360">360 Hellissandur</option>
<option value="370">370 B\xfa\xf0ardalur</option>
<option value="371">371 B\xfa\xf0ardalur</option>
<option value="380">380 Reykh\xf3lahreppur</option>
<option value="400">400 \xcdsafj\xf6r\xf0ur</option>
<option value="401">401 \xcdsafj\xf6r\xf0ur</option>
<option value="410">410 Hn\xedfsdalur</option>
<option value="415">415 Bolungarv\xedk</option>
<option value="420">420 S\xfa\xf0av\xedk</option>
<option value="425">425 Flateyri</option>
<option value="430">430 Su\xf0ureyri</option>
<option value="450">450 Patreksfj\xf6r\xf0ur</option>
<option value="451">451 Patreksfj\xf6r\xf0ur</option>
<option value="460">460 T\xe1lknafj\xf6r\xf0ur</option>
<option value="465">465 B\xedldudalur</option>
<option value="470">470 \xdeingeyri</option>
<option value="471">471 \xdeingeyri</option>
<option value="500">500 Sta\xf0ur</option>
<option value="510">510 H\xf3lmav\xedk</option>
<option value="512">512 H\xf3lmav\xedk</option>
<option value="520">520 Drangsnes</option>
<option value="522">522 Kj\xf6rvogur</option>
<option value="523">523 B\xe6r</option>
<option value="524">524 Nor\xf0urfj\xf6r\xf0ur</option>
<option value="530">530 Hvammstangi</option>
<option value="531">531 Hvammstangi</option>
<option value="540">540 Bl\xf6ndu\xf3s</option>
<option value="541">541 Bl\xf6ndu\xf3s</option>
<option value="545">545 Skagastr\xf6nd</option>
<option value="550">550 Sau\xf0\xe1rkr\xf3kur</option>
<option value="551">551 Sau\xf0\xe1rkr\xf3kur</option>
<option value="560">560 Varmahl\xed\xf0</option>
<option value="565">565 Hofs\xf3s</option>
<option value="566">566 Hofs\xf3s</option>
<option value="570">570 Flj\xf3t</option>
<option value="580">580 Siglufj\xf6r\xf0ur</option>
<option value="600">600 Akureyri</option>
<option value="601">601 Akureyri</option>
<option value="602">602 Akureyri</option>
<option value="603">603 Akureyri</option>
<option value="610">610 Greniv\xedk</option>
<option value="611">611 Gr\xedmsey</option>
<option value="620">620 Dalv\xedk</option>
<option value="621">621 Dalv\xedk</option>
<option value="625">625 \xd3lafsfj\xf6r\xf0ur</option>
<option value="630">630 Hr\xedsey</option>
<option value="640">640 H\xfasav\xedk</option>
<option value="641">641 H\xfasav\xedk</option>
<option value="645">645 Fossh\xf3ll</option>
<option value="650">650 Laugar</option>
<option value="660">660 M\xfdvatn</option>
<option value="670">670 K\xf3pasker</option>
<option value="671">671 K\xf3pasker</option>
<option value="675">675 Raufarh\xf6fn</option>
<option value="680">680 \xde\xf3rsh\xf6fn</option>
<option value="681">681 \xde\xf3rsh\xf6fn</option>
<option value="685">685 Bakkafj\xf6r\xf0ur</option>
<option value="690">690 Vopnafj\xf6r\xf0ur</option>
<option value="700">700 Egilssta\xf0ir</option>
<option value="701">701 Egilssta\xf0ir</option>
<option value="710">710 Sey\xf0isfj\xf6r\xf0ur</option>
<option value="715">715 Mj\xf3ifj\xf6r\xf0ur</option>
<option value="720">720 Borgarfj\xf6r\xf0ur eystri</option>
<option value="730">730 Rey\xf0arfj\xf6r\xf0ur</option>
<option value="735">735 Eskifj\xf6r\xf0ur</option>
<option value="740">740 Neskaupsta\xf0ur</option>
<option value="750">750 F\xe1skr\xfa\xf0sfj\xf6r\xf0ur</option>
<option value="755">755 St\xf6\xf0varfj\xf6r\xf0ur</option>
<option value="760">760 Brei\xf0dalsv\xedk</option>
<option value="765">765 Dj\xfapivogur</option>
<option value="780">780 H\xf6fn \xed Hornafir\xf0i</option>
<option value="781">781 H\xf6fn \xed Hornafir\xf0i</option>
<option value="785">785 \xd6r\xe6fi</option>
<option value="800">800 Selfoss</option>
<option value="801">801 Selfoss</option>
<option value="802">802 Selfoss</option>
<option value="810">810 Hverager\xf0i</option>
<option value="815">815 \xdeorl\xe1ksh\xf6fn</option>
<option value="816">816 \xd6lfus</option>
<option value="820">820 Eyrarbakki</option>
<option value="825">825 Stokkseyri</option>
<option value="840">840 Laugarvatn</option>
<option value="845">845 Fl\xfa\xf0ir</option>
<option value="850">850 Hella</option>
<option value="851">851 Hella</option>
<option value="860">860 Hvolsv\xf6llur</option>
<option value="861">861 Hvolsv\xf6llur</option>
<option value="870">870 V\xedk</option>
<option value="871">871 V\xedk</option>
<option value="880">880 Kirkjub\xe6jarklaustur</option>
<option value="900">900 Vestmannaeyjar</option>
<option value="902">902 Vestmannaeyjar</option>
</select>'''
self.assertHTMLEqual(f.render('foo', 'bar'), out)
def test_ISIdNumberField(self):
error_atleast = ['Ensure this value has at least 10 characters (it has 9).']
error_invalid = ['Enter a valid Icelandic identification number. The format is XXXXXX-XXXX.']
error_atmost = ['Ensure this value has at most 11 characters (it has 12).']
error_notvalid = ['The Icelandic identification number is not valid.']
valid = {
'2308803449': '230880-3449',
'230880-3449': '230880-3449',
'230880 3449': '230880-3449',
'2308803440': '230880-3440',
}
invalid = {
'230880343': error_atleast + error_invalid,
'230880343234': error_atmost + error_invalid,
'abcdefghijk': error_invalid,
'2308803439': error_notvalid,
}
self.assertFieldOutput(ISIdNumberField, valid, invalid)
def test_ISPhoneNumberField(self):
error_invalid = ['Enter a valid value.']
error_atleast = ['Ensure this value has at least 7 characters (it has 6).']
error_atmost = ['Ensure this value has at most 8 characters (it has 9).']
valid = {
'1234567': '1234567',
'123 4567': '1234567',
'123-4567': '1234567',
}
invalid = {
'123-456': error_invalid,
'123456': error_atleast + error_invalid,
'123456555': error_atmost + error_invalid,
'abcdefg': error_invalid,
' 1234567 ': error_atmost + error_invalid,
' 12367 ': error_invalid
}
self.assertFieldOutput(ISPhoneNumberField, valid, invalid)
|
M157q/django-localflavor
|
tests/test_is.py
|
Python
|
bsd-3-clause
| 9,213
| 0.000543
|
import tkinter as tk
from tkinter.filedialog import askdirectory
from tkinter.messagebox import showwarning, showerror, showinfo
from tkinter import ttk
import logging
import sys
from threading import Thread
from spider_board.client import Browser
from spider_board.utils import time_job, LOG_FILE, get_logger, humansize
# Create the logging handlers and attach them
logger = get_logger(__name__, LOG_FILE)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
logger.addHandler(stream_handler)
class Gui:
def __init__(self):
logger.info('Instantiating GUI')
self.root = tk.Tk()
self.browser = None
self.make_gui()
def make_gui(self):
logger.info('Building GUI')
self.main_frame = ttk.Frame(self.root)
self.main_frame.pack(expand=True, fill=tk.BOTH, pady=10, padx=10)
# Make the username label and box
ttk.Label(self.main_frame, text='Username:').grid(row=0, column=2)
self.username = tk.StringVar()
self.username_box = ttk.Entry(self.main_frame,
textvariable=self.username)
self.username_box.grid(row=0, column=3, sticky='nsew')
# Make the password label and box
ttk.Label(self.main_frame, text='Password:').grid(row=1, column=2)
self.password = tk.StringVar()
self.password_box = ttk.Entry(self.main_frame,
textvariable=self.password)
self.password_box.grid(row=1, column=3, sticky='nsew')
# Make the savefile label and box
self.savefile_btn = ttk.Button(self.main_frame, text='Browse',
command=self.ask_find_directory)
self.savefile_btn.grid(row=2, column=2)
self.savefile = tk.StringVar()
self.savefile_box = ttk.Entry(self.main_frame,
textvariable=self.savefile)
self.savefile_box.grid(row=2, column=3, sticky='nsew')
# Set up the column weightings
self.main_frame.columnconfigure(3, weight=1)
self.main_frame.columnconfigure(0, weight=5)
self.main_frame.rowconfigure(3, weight=1)
# Make the listbox (and scrollbar) for selecting units
self.unit_box = tk.Listbox(self.main_frame, relief=tk.SUNKEN,
selectmode=tk.EXTENDED)
self.unit_box.grid(row=0, column=0,
rowspan=5, columnspan=2,
sticky='nsew')
scrollbar = tk.Scrollbar(self.main_frame)
scrollbar.config(command=self.unit_box.yview)
self.unit_box.config(yscrollcommand=scrollbar.set)
scrollbar.grid(row=0, column=1, rowspan=5, sticky='nsew')
# Make the "login" button
self.go_button = ttk.Button(self.main_frame, text='Login',
command=self.login)
self.go_button.grid(row=4, column=2, sticky='es')
# Make the "start downloading" button
self.go_button = ttk.Button(self.main_frame, text='Start Downloading',
command=self.start_downloading)
self.go_button.grid(row=4, column=3, sticky='es')
def login(self):
logger.info('Login button pressed')
username = self.username.get()
password = self.password.get()
savefile = self.savefile.get()
# Check all required fields are filled in
if username and password and savefile:
logger.info('Attempting login')
self.browser = Browser(username, password, savefile)
self.bootstrap_browser(self.browser)
# Do the login in a different thread
Thread(target=self.browser.login).start()
else:
showwarning('Ok', 'Please fill in all necessary fields.')
logger.warn("Required fields haven't been filled in")
def start_downloading(self):
logger.info('Download button pressed')
if self.browser and self.browser.is_logged_in:
self.browser.spider_concurrent()
self.browser.download_concurrent()
else:
logger.info('Not logged in')
showerror('Ok', 'Not logged in')
def ask_find_directory(self):
save_location = askdirectory()
self.savefile.set(save_location)
def mainloop(self):
self.root.mainloop()
def quit(self):
self.root.destroy()
def update_units(self):
self.unit_box.delete(0, tk.END)
for unit in self.browser.units:
self.unit_box.insert(tk.END, unit.title)
self.root.after(1000, self.update_units)
def bootstrap_browser(self, browser):
"""
Add in any hooks to the browser so they will be run on certain events.
"""
def on_quit(browser_instance, gui):
"""Close the GUI"""
gui.quit()
def on_login_successful(browser_instance, gui):
"""Fire off an info dialog and get units (in another thread)"""
# Thread(target=browser_instance.get_units).start()
gui.root.after(0, showinfo, 'Ok', 'Login Successful')
def on_login_failed(browser_instance, gui):
"""Fire off an error dialog"""
showerror('Ok', 'Login Unsuccessful')
def on_get_units(browser_instance, gui):
gui.root.after(0, gui.update_units)
hooks = [on_quit, on_login_successful, on_login_failed,
on_get_units]
# Do the actual bootstrapping
for hook in hooks:
callback = lambda browser_instance: hook(browser_instance, self)
setattr(browser, hook.__name__, callback)
browser.on_login_failed(self)
|
Michael-F-Bryan/spider_board
|
spider_board/gui.py
|
Python
|
mit
| 5,607
| 0.003389
|
#!/usr/bin/env python
import sys
sys.path.append('/var/www/html/modules/libraries')
import avahi
import dbus
from time import sleep
import mysql.connector
file = open('/var/www/html/config.php', 'r')
for line in file:
if "db_name" in line: MySQL_database = line.split('"')[3]
elif "db_user" in line: MySQL_username = line.split('"')[3]
elif "db_password" in line: MySQL_password = line.split('"')[3]
cnx = mysql.connector.connect(user=MySQL_username,password=MySQL_password,database=MySQL_database)
cursor = cnx.cursor()
query = ("SELECT Setting,value FROM Settings")
cursor.execute(query)
for (Setting, value) in cursor:
if Setting == "MQTT_ip_address":
MQTT_ip_address = value
cursor.close()
cnx.close()
class ServiceAnnouncer:
def __init__(self, name, service, port, txt):
bus = dbus.SystemBus()
server = dbus.Interface(bus.get_object(avahi.DBUS_NAME, avahi.DBUS_PATH_SERVER), avahi.DBUS_INTERFACE_SERVER)
group = dbus.Interface(bus.get_object(avahi.DBUS_NAME, server.EntryGroupNew()),
avahi.DBUS_INTERFACE_ENTRY_GROUP)
self._service_name = name
index = 1
while True:
try:
group.AddService(avahi.IF_UNSPEC, avahi.PROTO_INET, 0, self._service_name, service, '', '', port, avahi.string_array_to_txt_array(txt))
except dbus.DBusException: # name collision -> rename
index += 1
self._service_name = '%s #%s' % (name, str(index))
else:
break
group.Commit()
def get_service_name(self):
return self._service_name
if __name__ == '__main__':
announcer = ServiceAnnouncer(MQTT_ip_address, '_irulez._tcp.', 80,'')
print announcer.get_service_name()
sleep(10000)
|
deklungel/iRulez
|
old/modules/discovery/discovery.py
|
Python
|
mit
| 1,800
| 0.012222
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from tempest.lib import exceptions
from neutronclient.tests.functional import base
class SimpleReadOnlyNeutronClientTest(base.ClientTestBase):
"""This is a first pass at a simple read only python-neutronclient test.
This only exercises client commands that are read only.
This should test commands:
* as a regular user
* as a admin user
* with and without optional parameters
* initially just check return codes, and later test command outputs
"""
def test_admin_fake_action(self):
self.assertRaises(exceptions.CommandFailed,
self.neutron,
'this-does-neutron-exist')
# NOTE(mestery): Commands in order listed in 'neutron help'
# Optional arguments:
def test_neutron_fake_action(self):
self.assertRaises(exceptions.CommandFailed,
self.neutron,
'this-does-not-exist')
def test_neutron_net_list(self):
net_list = self.parser.listing(self.neutron('net-list'))
self.assertTableStruct(net_list, ['id', 'name', 'subnets'])
def test_neutron_ext_list(self):
ext = self.parser.listing(self.neutron('ext-list'))
self.assertTableStruct(ext, ['alias', 'name'])
def test_neutron_dhcp_agent_list_hosting_net(self):
self.neutron('dhcp-agent-list-hosting-net',
params='private')
def test_neutron_agent_list(self):
agents = self.parser.listing(self.neutron('agent-list'))
field_names = ['id', 'agent_type', 'host', 'alive', 'admin_state_up']
self.assertTableStruct(agents, field_names)
def test_neutron_floatingip_list(self):
self.neutron('floatingip-list')
def test_neutron_meter_label_list(self):
self.neutron('meter-label-list')
def test_neutron_meter_label_rule_list(self):
self.neutron('meter-label-rule-list')
def _test_neutron_lbaas_command(self, command):
try:
self.neutron(command)
except exceptions.CommandFailed as e:
if '404 Not Found' not in e.stderr:
self.fail('%s: Unexpected failure.' % command)
def test_neutron_lb_healthmonitor_list(self):
self._test_neutron_lbaas_command('lb-healthmonitor-list')
def test_neutron_lb_member_list(self):
self._test_neutron_lbaas_command('lb-member-list')
def test_neutron_lb_pool_list(self):
self._test_neutron_lbaas_command('lb-pool-list')
def test_neutron_lb_vip_list(self):
self._test_neutron_lbaas_command('lb-vip-list')
def test_neutron_net_external_list(self):
net_ext_list = self.parser.listing(self.neutron('net-external-list'))
self.assertTableStruct(net_ext_list, ['id', 'name', 'subnets'])
def test_neutron_port_list(self):
port_list = self.parser.listing(self.neutron('port-list'))
self.assertTableStruct(port_list, ['id', 'name', 'mac_address',
'fixed_ips'])
def test_neutron_quota_list(self):
self.neutron('quota-list')
def test_neutron_router_list(self):
router_list = self.parser.listing(self.neutron('router-list'))
self.assertTableStruct(router_list, ['id', 'name',
'external_gateway_info'])
def test_neutron_security_group_list(self):
security_grp = self.parser.listing(self.neutron('security-group-list'))
self.assertTableStruct(security_grp, ['id', 'name',
'security_group_rules'])
def test_neutron_security_group_rule_list(self):
security_grp = self.parser.listing(self.neutron
('security-group-rule-list'))
self.assertTableStruct(security_grp, ['id', 'security_group',
'direction', 'ethertype',
'port/protocol', 'remote'])
def test_neutron_subnet_list(self):
subnet_list = self.parser.listing(self.neutron('subnet-list'))
self.assertTableStruct(subnet_list, ['id', 'name', 'cidr',
'allocation_pools'])
def test_neutron_firewall_list(self):
firewall_list = self.parser.listing(self.neutron
('firewall-list'))
self.assertTableStruct(firewall_list, ['id', 'name',
'firewall_policy_id'])
def test_neutron_firewall_policy_list(self):
firewall_policy = self.parser.listing(self.neutron
('firewall-policy-list'))
self.assertTableStruct(firewall_policy, ['id', 'name',
'firewall_rules'])
def test_neutron_firewall_rule_list(self):
firewall_rule = self.parser.listing(self.neutron
('firewall-rule-list'))
self.assertTableStruct(firewall_rule, ['id', 'name',
'firewall_policy_id',
'summary', 'enabled'])
def test_neutron_help(self):
help_text = self.neutron('help')
lines = help_text.split('\n')
self.assertFirstLineStartsWith(lines, 'usage: neutron')
commands = []
cmds_start = lines.index('Commands for API v2.0:')
command_pattern = re.compile('^ {2}([a-z0-9\-\_]+)')
for line in lines[cmds_start:]:
match = command_pattern.match(line)
if match:
commands.append(match.group(1))
commands = set(commands)
wanted_commands = set(('net-create', 'subnet-list', 'port-delete',
'router-show', 'agent-update', 'help'))
self.assertFalse(wanted_commands - commands)
# Optional arguments:
def test_neutron_version(self):
self.neutron('', flags='--version')
def test_neutron_debug_net_list(self):
self.neutron('net-list', flags='--debug')
def test_neutron_quiet_net_list(self):
self.neutron('net-list', flags='--quiet')
|
eayunstack/python-neutronclient
|
neutronclient/tests/functional/core/test_readonly_neutron.py
|
Python
|
apache-2.0
| 6,814
| 0.000294
|
from OctaHomeCore.OctaFiles.urls.base import *
from OctaHomeTempControl.views import *
class TempControlOctaUrls(OctaUrls):
@classmethod
def getUrls(cls):
return [
url(r'^TempControl/command/(?P<command>\w+)/$', handleTempCommand.as_view(), name='TempControlCommandWithOutDevice'),
url(r'^TempControl/command/(?P<command>\w+)/(?P<deviceType>\w+)/(?P<deviceId>\d+)/$', handleTempCommand.as_view(), name='TempControlCommand'),
url(r'^TempControl/command/(?P<command>\w+)/(?P<house>\w+)/(?P<deviceType>\w+)/(?P<deviceId>\d+)/$', handleTempCommand.as_view(), name='TempControlCommand'),
url(r'^TempControl/command/(?P<command>\w+)/(?P<house>\w+)/(?P<room>\w+)/(?P<deviceType>\w+)/(?P<deviceId>\d+)/$', handleTempCommand.as_view(), name='TempControlCommand'),
url(r'^TempControl/page/(?P<page>\w+)/$', handleTempView.as_view(), name='TempControlPage'),
url(r'^TempControl/page/(?P<page>\w+)/(?P<deviceType>\w+)/(?P<deviceId>\d+)/$', handleTempCommand.as_view(), name='TempControlPage'),
url(r'^TempControl/page/(?P<house>\w+)/(?P<page>\w+)/$', handleTempView.as_view(), name='TempControlPage'),
url(r'^TempControl/page/(?P<house>\w+)/(?P<room>\w+)/(?P<page>\w+)/$', handleTempView.as_view(), name='TempControlPage'),
url(r'^TempControl/$', handleTempView.as_view(), name='TempControl'),
url(r'^TempControl/(?P<house>\w+)/$', handleTempView.as_view(), name='TempControl'),
url(r'^TempControl/(?P<house>\w+)/(?P<room>\w+)/$', handleTempView.as_view(), name='TempControl'),
url(r'^TempControl/(?P<house>\w+)/(?P<room>\w+)/(?P<deviceType>\w+)/(?P<deviceId>\d+)/$', handleTempView.as_view(), name='TempControl'),
]
|
Tomcuzz/OctaHomeAutomation
|
OctaHomeTempControl/OctaFiles/urls.py
|
Python
|
mit
| 1,656
| 0.019324
|
# Copyright (c) 2016 Huawei Technologies India Pvt Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import requests
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslotest import base
import networking_huawei.drivers.ac.client.restclient as ac_rest
from networking_huawei.drivers.ac.common import config # noqa
test_create_network_req = {'network':
{'routerExternal': False,
'networkType': 'local',
'segmentationId': None,
'adminStateUp': True,
'tenant_id': 'test-tenant',
'name': 'net1',
'physicalNetwork': None,
'serviceName': 'physnet1',
'id': 'd897e21a-dfd6-4331-a5dd-7524fa421c3e',
'status': 'ACTIVE',
'shared': False}}
class HuaweiACRestClientTestCase(base.BaseTestCase):
def setUp(self):
cfg.CONF.set_override('username', 'huawei_user', 'huawei_ac_config')
cfg.CONF.set_override('password', 'huawei_pwd', 'huawei_ac_config')
cfg.CONF.set_override('neutron_ip', '127.0.0.1', 'huawei_ac_config')
cfg.CONF.set_override('neutron_name', 'NS_1', 'huawei_ac_config')
super(HuaweiACRestClientTestCase, self).setUp()
self.restc = ac_rest.RestClient
self.host = cfg.CONF.huawei_ac_config.host
self.port = cfg.CONF.huawei_ac_config.port
self.url = '%s%s%s%s' % ("http://", self.host, ":", str(self.port))
def _mock_req_resp(self, status_code):
response = mock.Mock()
response.response = "OK"
response.status_code = status_code
response.errorcode = 0
response.content = jsonutils.dumps(
{'result': "ok", 'errorCode': '0', 'errorMsg': None}, indent=2)
return response
def test_rc_send_timeout(self):
methodname = 'POST'
url = '/controller/dc/esdk/v2.0/test_url'
expected_ret = {'errorCode': None, 'reason': None,
'response': None, 'status': -1}
with mock.patch.object(self.restc, 'process_request',
return_value="Timeout Exceptions"):
ret = ac_rest.RestClient().send(self.host, self.port,
methodname, url, hex(10), {})
self.assertEqual(expected_ret, ret, "Not expected return")
def test_rc_send_success(self):
methodname = 'POST'
url = '/controller/dc/esdk/v2.0/test_url'
expected_resp = {'errorCode': u'0', 'reason': None,
'response': 'ok', 'status': 204}
with mock.patch.object(self.restc,
'process_request',
return_value=self._mock_req_resp
(requests.codes.no_content)):
ret = ac_rest.RestClient().send(self.host, self.port,
methodname, url,
hex(10),
test_create_network_req)
self.assertEqual(expected_resp, ret, "Not expected response")
def test_rc_send_del_network(self):
methodname = 'DELETE'
url = '/controller/dc/esdk/v2.0/test_url'
expected_resp = {'errorCode': None, 'reason': None,
'response': None, 'status': 200}
resp = self._mock_req_resp(requests.codes.ok)
resp.content = ""
with mock.patch.object(self.restc, 'process_request',
return_value=resp):
ret = ac_rest.RestClient().send(self.host, self.port,
methodname, url,
hex(10),
test_create_network_req)
self.assertEqual(expected_resp, ret, "Not expected response")
def test_rc_send_del_network_resp_valid(self):
methodname = 'DELETE'
url = '/controller/dc/esdk/v2.0/test_url'
expected_resp = {'errorCode': None, 'reason': None,
'response': None, 'status': 300}
resp = self._mock_req_resp(requests.codes.multiple_choices)
with mock.patch.object(self.restc, 'process_request',
return_value=resp):
ret = ac_rest.RestClient().send(self.host, self.port,
methodname, url,
hex(10),
test_create_network_req)
self.assertEqual(expected_resp, ret, "Not expected response")
def test_rc_process_request(self):
methodname = 'DELETE'
url = '/controller/dc/esdk/v2.0/test_url'
auth = (cfg.CONF.huawei_ac_config.username,
cfg.CONF.huawei_ac_config.password)
headers = {'Accept': 'application/json',
'Content-type': 'application/json'}
data = {"network": {"routerExternal": False,
"id": "d897e21a-dfd6-4331-a5dd-7524fa421c3e",
"serviceName": "physnet1",
"status": "ACTIVE",
"shared": False,
"adminStateUp": True,
"tenant_id": "test-tenant",
"segmentationId": None,
"physicalNetwork": None,
"networkType": "local",
"name": "net1"}}
resp = self._mock_req_resp(requests.codes.no_content)
kwargs = {'url': url, 'data': data}
with mock.patch('requests.request',
return_value=resp) as mock_method:
ac_rest.RestClient().process_request(methodname, auth,
url, headers,
data)
mock_method.\
assert_called_once_with(
methodname,
headers={'Content-type':
'application/json',
'Accept':
'application/json'},
timeout=float(cfg.CONF.
huawei_ac_config.
request_timeout),
verify=False,
auth=(cfg.CONF.huawei_ac_config.username,
cfg.CONF.huawei_ac_config.password),
**kwargs)
def test_rc_process_request_timeout_exception(self):
methodname = 'DELETE'
url = '/controller/dc/esdk/v2.0/test_url'
auth = (cfg.CONF.huawei_ac_config.username,
cfg.CONF.huawei_ac_config.password)
headers = {'Accept': 'application/json',
'Content-type': 'application/json'}
data = {"network": {"routerExternal": False,
"id": "d897e21a-dfd6-4331-a5dd-7524fa421c3e",
"serviceName": "physnet1",
"status": "ACTIVE",
"shared": False,
"adminStateUp": True,
"tenant_id": "test-tenant",
"segmentationId": None,
"physicalNetwork": None,
"networkType": "local",
"name": "net1"}}
resp = self._mock_req_resp(requests.codes.no_content)
kwargs = {'url': url, 'data': data}
with mock.patch('requests.request',
return_value=resp) as mock_method:
mock_method.side_effect = requests.exceptions.\
Timeout(mock.Mock(msg="Timeout Exceptions"))
ac_rest.RestClient().\
process_request(methodname, auth, url, headers, data)
mock_method.\
assert_any_call(methodname,
headers={'Content-type':
'application/json',
'Accept':
'application/json'},
timeout=float(cfg.CONF.
huawei_ac_config.
request_timeout),
verify=False,
auth=(cfg.CONF.huawei_ac_config.username,
cfg.CONF.huawei_ac_config.password),
**kwargs)
def test_rc_process_request_exception(self):
methodname = 'DELETE'
url = '/controller/dc/esdk/v2.0/test_url'
auth = (cfg.CONF.huawei_ac_config.username,
cfg.CONF.huawei_ac_config.password)
headers = {'Accept': 'application/json',
'Content-type': 'application/json'}
data = {"network": {"routerExternal": False,
"id": "d897e21a-dfd6-4331-a5dd-7524fa421c3e",
"serviceName": "physnet1",
"status": "ACTIVE",
"shared": False,
"adminStateUp": True,
"tenant_id": "test-tenant",
"segmentationId": None,
"physicalNetwork": None,
"networkType": "local",
"name": "net1"}}
resp = self._mock_req_resp(requests.codes.no_content)
kwargs = {'url': url, 'data': data}
with mock.patch('requests.request',
return_value=resp) as mock_method:
mock_method.side_effect = Exception(mock.Mock(msg="Timeout "
"Exceptions"))
ac_rest.RestClient().process_request(methodname, auth,
url,
headers, data)
mock_method.\
assert_any_call(methodname,
headers={'Content-type':
'application/json',
'Accept':
'application/json'},
timeout=float(cfg.CONF.
huawei_ac_config.
request_timeout),
verify=False,
auth=(cfg.CONF.huawei_ac_config.username,
cfg.CONF.huawei_ac_config.password),
**kwargs)
def test_rc_send_http_success(self):
http = {'errorCode': None, 'reason': None,
'response': None, 'status': 300}
ret = ac_rest.RestClient().http_success(http)
self.assertEqual(False, ret,
"Not expected response")
|
libuparayil/networking-huawei
|
networking_huawei/tests/unit/drivers/ac/client/test_restclient.py
|
Python
|
apache-2.0
| 11,844
| 0
|
import unittest
from graph_diff.graph import rnr_graph, lr_node
from graph_diff.graph.graph_with_repetitive_nodes_exceptions import GraphWithRepetitiveNodesKeyError
class GraphWithRepetitiveNodesWithRootTest(unittest.TestCase):
def setUp(self):
self.test_graph = rnr_graph()
def test_add_node(self):
self.assertFalse(lr_node(1, 1) in self.test_graph)
self.test_graph.add_node(lr_node(1, 1))
self.assertTrue(lr_node(1, 1) in self.test_graph)
def test_add_edge(self):
self.assertFalse(lr_node(1, 1) in self.test_graph)
self.assertFalse(lr_node(1, 2) in self.test_graph)
self.test_graph.add_edge(lr_node(1, 1), lr_node(1, 2))
self.assertTrue(lr_node(1, 1) in self.test_graph)
self.assertTrue(lr_node(1, 2) in self.test_graph)
def test_add_edge_exp(self):
self.assertFalse(lr_node(1, 1) in self.test_graph)
self.assertFalse(lr_node(1, 2) in self.test_graph)
self.assertRaises(GraphWithRepetitiveNodesKeyError,
self.test_graph.add_edge_exp,
lr_node(1, 1),
lr_node(1, 2))
if __name__ == '__main__':
unittest.main()
|
alexander-bzikadze/graph_diff
|
tests/graph/test_graph_with_repetitive_nodes_with_root.py
|
Python
|
apache-2.0
| 1,211
| 0.000826
|
from setuptools import setup
__version__ = "0.5.0"
# Get the long description by reading the README
try:
readme_content = open("README.md").read()
except:
readme_content = ""
# Create the actual setup method
setup(name='pypred',
version=__version__,
description='A Python library for simple evaluation of natural language predicates',
long_description=readme_content,
author='Armon Dadgar',
author_email='armon@kiip.me',
maintainer='Armon Dadgar',
maintainer_email='armon@kiip.me',
url="https://github.com/armon/pypred/",
license="MIT License",
keywords=["python", "predicate", "natural language"],
packages=['pypred'],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Topic :: Software Development :: Libraries"
],
install_requires=["ply>=3.4"]
)
|
armon/pypred
|
setup.py
|
Python
|
bsd-3-clause
| 1,228
| 0.002443
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for writing summary data, for use in analysis and visualization.
See the [Summaries and
TensorBoard](https://www.tensorflow.org/guide/summaries_and_tensorboard) guide.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google.protobuf import json_format as _json_format
# exports Summary, SummaryDescription, Event, TaggedRunMetadata, SessionLog
# pylint: disable=unused-import
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.framework.summary_pb2 import SummaryDescription
from tensorflow.core.framework.summary_pb2 import SummaryMetadata as _SummaryMetadata # pylint: enable=unused-import
from tensorflow.core.util.event_pb2 import Event
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.core.util.event_pb2 import TaggedRunMetadata
# pylint: enable=unused-import
from tensorflow.python.eager import context as _context
from tensorflow.python.framework import constant_op as _constant_op
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import ops as _ops
from tensorflow.python.ops import gen_logging_ops as _gen_logging_ops
from tensorflow.python.ops import gen_summary_ops as _gen_summary_ops # pylint: disable=unused-import
from tensorflow.python.ops import summary_op_util as _summary_op_util
# exports FileWriter, FileWriterCache
# pylint: disable=unused-import
from tensorflow.python.summary.writer.writer import FileWriter
from tensorflow.python.summary.writer.writer_cache import FileWriterCache
# pylint: enable=unused-import
from tensorflow.python.util import compat as _compat
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=['summary.scalar'])
def scalar(name, tensor, collections=None, family=None):
"""Outputs a `Summary` protocol buffer containing a single scalar value.
The generated Summary has a Tensor.proto containing the input Tensor.
Args:
name: A name for the generated node. Will also serve as the series name in
TensorBoard.
tensor: A real numeric Tensor containing a single value.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
Returns:
A scalar `Tensor` of type `string`. Which contains a `Summary` protobuf.
Raises:
ValueError: If tensor has the wrong shape or type.
"""
if _summary_op_util.skip_summary():
return _constant_op.constant('')
with _summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
val = _gen_logging_ops.scalar_summary(tags=tag, values=tensor, name=scope)
_summary_op_util.collect(val, collections, [_ops.GraphKeys.SUMMARIES])
return val
@tf_export(v1=['summary.image'])
def image(name, tensor, max_outputs=3, collections=None, family=None):
"""Outputs a `Summary` protocol buffer with images.
The summary has up to `max_outputs` summary values containing images. The
images are built from `tensor` which must be 4-D with shape `[batch_size,
height, width, channels]` and where `channels` can be:
* 1: `tensor` is interpreted as Grayscale.
* 3: `tensor` is interpreted as RGB.
* 4: `tensor` is interpreted as RGBA.
The images have the same number of channels as the input tensor. For float
input, the values are normalized one image at a time to fit in the range
`[0, 255]`. `uint8` values are unchanged. The op uses two different
normalization algorithms:
* If the input values are all positive, they are rescaled so the largest one
is 255.
* If any input value is negative, the values are shifted so input value 0.0
is at 127. They are then rescaled so that either the smallest value is 0,
or the largest one is 255.
The `tag` in the outputted Summary.Value protobufs is generated based on the
name, with a suffix depending on the max_outputs setting:
* If `max_outputs` is 1, the summary value tag is '*name*/image'.
* If `max_outputs` is greater than 1, the summary value tags are
generated sequentially as '*name*/image/0', '*name*/image/1', etc.
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
tensor: A 4-D `uint8` or `float32` `Tensor` of shape `[batch_size, height,
width, channels]` where `channels` is 1, 3, or 4.
max_outputs: Max number of batch elements to generate images for.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [_ops.GraphKeys.SUMMARIES]
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
if _summary_op_util.skip_summary():
return _constant_op.constant('')
with _summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
val = _gen_logging_ops.image_summary(
tag=tag, tensor=tensor, max_images=max_outputs, name=scope)
_summary_op_util.collect(val, collections, [_ops.GraphKeys.SUMMARIES])
return val
@tf_export(v1=['summary.histogram'])
def histogram(name, values, collections=None, family=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with a histogram.
Adding a histogram summary makes it possible to visualize your data's
distribution in TensorBoard. You can see a detailed explanation of the
TensorBoard histogram dashboard
[here](https://www.tensorflow.org/get_started/tensorboard_histograms).
The generated
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
has one summary value containing a histogram for `values`.
This op reports an `InvalidArgument` error if any value is not finite.
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
values: A real numeric `Tensor`. Any shape. Values to use to
build the histogram.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
if _summary_op_util.skip_summary():
return _constant_op.constant('')
with _summary_op_util.summary_scope(
name, family, values=[values],
default_name='HistogramSummary') as (tag, scope):
val = _gen_logging_ops.histogram_summary(
tag=tag, values=values, name=scope)
_summary_op_util.collect(val, collections, [_ops.GraphKeys.SUMMARIES])
return val
@tf_export(v1=['summary.audio'])
def audio(name, tensor, sample_rate, max_outputs=3, collections=None,
family=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with audio.
The summary has up to `max_outputs` summary values containing audio. The
audio is built from `tensor` which must be 3-D with shape `[batch_size,
frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
assumed to be in the range of `[-1.0, 1.0]` with a sample rate of
`sample_rate`.
The `tag` in the outputted Summary.Value protobufs is generated based on the
name, with a suffix depending on the max_outputs setting:
* If `max_outputs` is 1, the summary value tag is '*name*/audio'.
* If `max_outputs` is greater than 1, the summary value tags are
generated sequentially as '*name*/audio/0', '*name*/audio/1', etc
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
tensor: A 3-D `float32` `Tensor` of shape `[batch_size, frames, channels]`
or a 2-D `float32` `Tensor` of shape `[batch_size, frames]`.
sample_rate: A Scalar `float32` `Tensor` indicating the sample rate of the
signal in hertz.
max_outputs: Max number of batch elements to generate audio for.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [_ops.GraphKeys.SUMMARIES]
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
if _summary_op_util.skip_summary():
return _constant_op.constant('')
with _summary_op_util.summary_scope(
name, family=family, values=[tensor]) as (tag, scope):
sample_rate = _ops.convert_to_tensor(
sample_rate, dtype=_dtypes.float32, name='sample_rate')
val = _gen_logging_ops.audio_summary_v2(
tag=tag, tensor=tensor, max_outputs=max_outputs,
sample_rate=sample_rate, name=scope)
_summary_op_util.collect(val, collections, [_ops.GraphKeys.SUMMARIES])
return val
@tf_export(v1=['summary.text'])
def text(name, tensor, collections=None):
"""Summarizes textual data.
Text data summarized via this plugin will be visible in the Text Dashboard
in TensorBoard. The standard TensorBoard Text Dashboard will render markdown
in the strings, and will automatically organize 1d and 2d tensors into tables.
If a tensor with more than 2 dimensions is provided, a 2d subarray will be
displayed along with a warning message. (Note that this behavior is not
intrinsic to the text summary api, but rather to the default TensorBoard text
plugin.)
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
tensor: a string-type Tensor to summarize.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [_ops.GraphKeys.SUMMARIES]
Returns:
A TensorSummary op that is configured so that TensorBoard will recognize
that it contains textual data. The TensorSummary is a scalar `Tensor` of
type `string` which contains `Summary` protobufs.
Raises:
ValueError: If tensor has the wrong type.
"""
if tensor.dtype != _dtypes.string:
raise ValueError('Expected tensor %s to have dtype string, got %s' %
(tensor.name, tensor.dtype))
summary_metadata = _SummaryMetadata(
plugin_data=_SummaryMetadata.PluginData(plugin_name='text'))
t_summary = tensor_summary(
name=name,
tensor=tensor,
summary_metadata=summary_metadata,
collections=collections)
return t_summary
@tf_export(v1=['summary.tensor_summary'])
def tensor_summary(name,
tensor,
summary_description=None,
collections=None,
summary_metadata=None,
family=None,
display_name=None):
"""Outputs a `Summary` protocol buffer with a serialized tensor.proto.
Args:
name: A name for the generated node. If display_name is not set, it will
also serve as the tag name in TensorBoard. (In that case, the tag
name will inherit tf name scopes.)
tensor: A tensor of any type and shape to serialize.
summary_description: A long description of the summary sequence. Markdown
is supported.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
summary_metadata: Optional SummaryMetadata proto (which describes which
plugins may use the summary value).
family: Optional; if provided, used as the prefix of the summary tag,
which controls the name used for display on TensorBoard when
display_name is not set.
display_name: A string used to name this data in TensorBoard. If this is
not set, then the node name will be used instead.
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
if summary_metadata is None:
summary_metadata = _SummaryMetadata()
if summary_description is not None:
summary_metadata.summary_description = summary_description
if display_name is not None:
summary_metadata.display_name = display_name
serialized_summary_metadata = summary_metadata.SerializeToString()
if _summary_op_util.skip_summary():
return _constant_op.constant('')
with _summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
val = _gen_logging_ops.tensor_summary_v2(
tensor=tensor,
tag=tag,
name=scope,
serialized_summary_metadata=serialized_summary_metadata)
_summary_op_util.collect(val, collections, [_ops.GraphKeys.SUMMARIES])
return val
@tf_export(v1=['summary.merge'])
def merge(inputs, collections=None, name=None):
# pylint: disable=line-too-long
"""Merges summaries.
This op creates a
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
protocol buffer that contains the union of all the values in the input
summaries.
When the Op is run, it reports an `InvalidArgument` error if multiple values
in the summaries to merge use the same tag.
Args:
inputs: A list of `string` `Tensor` objects containing serialized `Summary`
protocol buffers.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[]`.
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer resulting from the merging.
Raises:
RuntimeError: If called with eager mode enabled.
@compatibility(eager)
Not compatible with eager execution. To write TensorBoard
summaries under eager execution, use `tf.contrib.summary` instead.
@end_compatibility
"""
# pylint: enable=line-too-long
if _context.executing_eagerly():
raise RuntimeError(
'Merging tf.summary.* ops is not compatible with eager execution. '
'Use tf.contrib.summary instead.')
if _summary_op_util.skip_summary():
return _constant_op.constant('')
name = _summary_op_util.clean_tag(name)
with _ops.name_scope(name, 'Merge', inputs):
val = _gen_logging_ops.merge_summary(inputs=inputs, name=name)
_summary_op_util.collect(val, collections, [])
return val
@tf_export(v1=['summary.merge_all'])
def merge_all(key=_ops.GraphKeys.SUMMARIES, scope=None, name=None):
"""Merges all summaries collected in the default graph.
Args:
key: `GraphKey` used to collect the summaries. Defaults to
`GraphKeys.SUMMARIES`.
scope: Optional scope used to filter the summary ops, using `re.match`
Returns:
If no summaries were collected, returns None. Otherwise returns a scalar
`Tensor` of type `string` containing the serialized `Summary` protocol
buffer resulting from the merging.
Raises:
RuntimeError: If called with eager execution enabled.
@compatibility(eager)
Not compatible with eager execution. To write TensorBoard
summaries under eager execution, use `tf.contrib.summary` instead.
@end_compatibility
"""
if _context.executing_eagerly():
raise RuntimeError(
'Merging tf.summary.* ops is not compatible with eager execution. '
'Use tf.contrib.summary instead.')
summary_ops = _ops.get_collection(key, scope=scope)
if not summary_ops:
return None
else:
return merge(summary_ops, name=name)
@tf_export(v1=['summary.get_summary_description'])
def get_summary_description(node_def):
"""Given a TensorSummary node_def, retrieve its SummaryDescription.
When a Summary op is instantiated, a SummaryDescription of associated
metadata is stored in its NodeDef. This method retrieves the description.
Args:
node_def: the node_def_pb2.NodeDef of a TensorSummary op
Returns:
a summary_pb2.SummaryDescription
Raises:
ValueError: if the node is not a summary op.
@compatibility(eager)
Not compatible with eager execution. To write TensorBoard
summaries under eager execution, use `tf.contrib.summary` instead.
@end_compatibility
"""
if node_def.op != 'TensorSummary':
raise ValueError("Can't get_summary_description on %s" % node_def.op)
description_str = _compat.as_str_any(node_def.attr['description'].s)
summary_description = SummaryDescription()
_json_format.Parse(description_str, summary_description)
return summary_description
|
jbedorf/tensorflow
|
tensorflow/python/summary/summary.py
|
Python
|
apache-2.0
| 17,400
| 0.003333
|
from django import forms
from django.forms.widgets import *
from django.utils.safestring import mark_safe
from madrona.analysistools.widgets import SliderWidget, DualSliderWidget
class AdminFileWidget(forms.FileInput):
"""
A FileField Widget that shows its current value if it has one.
"""
def __init__(self, attrs={}):
super(AdminFileWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
output = []
if value and hasattr(value, "name"):
filename = split(value.name)[-1]
output.append('Current File: <a href="%s" target="_blank">%s</a> : <input style="top:0px;margin-bottom:0px" type="checkbox" name="clear_%s" /> Remove </p>' % (value._get_url(), filename, name))
output.append('<p> Change:')
output.append(super(AdminFileWidget, self).render(name, value, attrs))
#output.append("</p>")
return mark_safe(u''.join(output))
class SliderWidgetWithTooltip(SliderWidget):
def __init__(self, min, max, step, id):
super(SliderWidgetWithTooltip, self).__init__(min, max, step)
self.id = id
def render(self, *args, **kwargs):
output = super(SliderWidgetWithTooltip, self).render(*args,**kwargs)
img_id = self.id
span_id = "%s_content" %self.id
#grabbing flatblock outright as including the flatblock template tag in the output html resulted in a literal output of the template tag
from flatblocks.models import FlatBlock
try:
flatblock = str(FlatBlock.objects.get(slug=self.id).content)
except:
flatblock = ""
output = output.replace('\n', ' <img src="/media/marco/img/info.png" id="%s" class="info" />\n' %img_id, 1)
output = output.replace('\n', ' <span id="%s" style="display: none;">%s</span>\n' %(span_id, flatblock), 1)
return mark_safe(output)
class DualSliderWidgetWithTooltip(DualSliderWidget):
def __init__(self, param1, param2, min, max, step, id):
super(DualSliderWidgetWithTooltip, self).__init__(param1, param2, min, max, step)
self.id = id
def render(self, *args, **kwargs):
output = super(DualSliderWidgetWithTooltip, self).render(*args,**kwargs)
output = output.replace('\n', '<img src="/media/marco/img/info.png" id="%s" class="info" />\n' %self.id, 1)
return mark_safe(output)
class CheckboxSelectMultipleWithObjTooltip(forms.CheckboxSelectMultiple):
def __init__(self, queryset=None, attrs=None):
super(CheckboxSelectMultipleWithObjTooltip, self).__init__(attrs)
self.queryset = queryset
self.attrs = attrs
def render(self, *args, **kwargs):
output = super(CheckboxSelectMultipleWithObjTooltip, self).render(*args,**kwargs)
for obj in self.queryset:
output = output.replace(str(obj), '%s <img src="/media/marco/img/info.png" id="info_%s" class="info" />' %(str(obj), obj.objective.short_name) )
#print output
return mark_safe(output)
class CheckboxSelectMultipleWithTooltip(forms.CheckboxSelectMultiple):
def __init__(self, queryset=None, substrate=None, attrs=None):
super(CheckboxSelectMultipleWithTooltip, self).__init__(attrs)
self.queryset = queryset
self.substrate = substrate
self.attrs = attrs
def render(self, *args, **kwargs):
output = super(CheckboxSelectMultipleWithTooltip, self).render(*args,**kwargs)
for param in self.queryset:
tidal_substrate = False
try:
if param.parameter.short_name == 'substrate' and self.substrate is None and 'tidal' in self.attrs['class']:
tidal_substrate = True
except:
pass
if param.parameter.short_name == 'substrate' and self.substrate is not None:
output = output.replace(str(param), '%s <img src="/media/marco/img/info.png" id="info_%s" class="info" />' %(str(param), self.substrate) )
elif tidal_substrate:
output = output.replace(str(param), '%s <img src="/media/marco/img/info.png" id="info_tidal_substrate" class="info" />' %(str(param)) )
else:
output = output.replace(str(param), '%s <img src="/media/marco/img/info.png" id="info_%s" class="info" />' %(str(param), param.parameter.short_name) )
#print output
return mark_safe(output)
|
Ecotrust/PEW-EFH
|
mp/scenarios/widgets.py
|
Python
|
apache-2.0
| 4,479
| 0.014066
|
from sympy import S, Integral, sin, cos, pi, sqrt, symbols
from sympy.physics.mechanics import (Dyadic, Particle, Point, ReferenceFrame,
RigidBody, Vector)
from sympy.physics.mechanics import (angular_momentum, dynamicsymbols,
inertia, inertia_of_point_mass,
kinetic_energy, linear_momentum, \
outer, potential_energy)
from sympy.physics.mechanics.functions import _mat_inv_mul
from sympy.utilities.pytest import raises
Vector.simp = True
q1, q2, q3, q4, q5 = symbols('q1 q2 q3 q4 q5')
N = ReferenceFrame('N')
A = N.orientnew('A', 'Axis', [q1, N.z])
B = A.orientnew('B', 'Axis', [q2, A.x])
C = B.orientnew('C', 'Axis', [q3, B.y])
def test_inertia():
N = ReferenceFrame('N')
ixx, iyy, izz = symbols('ixx iyy izz')
ixy, iyz, izx = symbols('ixy iyz izx')
assert inertia(N, ixx, iyy, izz) == (ixx * (N.x | N.x) + iyy *
(N.y | N.y) + izz * (N.z | N.z))
assert inertia(N, 0, 0, 0) == 0 * (N.x | N.x)
assert inertia(N, ixx, iyy, izz, ixy, iyz, izx) == (ixx * (N.x | N.x) +
ixy * (N.x | N.y) + izx * (N.x | N.z) + ixy * (N.y | N.x) + iyy *
(N.y | N.y) + iyz * (N.y | N.z) + izx * (N.z | N.x) + iyz * (N.z |
N.y) + izz * (N.z | N.z))
def test_inertia_of_point_mass():
r, s, t, m = symbols('r s t m')
N = ReferenceFrame('N')
px = r * N.x
I = inertia_of_point_mass(m, px, N)
assert I == m * r**2 * (N.y | N.y) + m * r**2 * (N.z | N.z)
py = s * N.y
I = inertia_of_point_mass(m, py, N)
assert I == m * s**2 * (N.x | N.x) + m * s**2 * (N.z | N.z)
pz = t * N.z
I = inertia_of_point_mass(m, pz, N)
assert I == m * t**2 * (N.x | N.x) + m * t**2 * (N.y | N.y)
p = px + py + pz
I = inertia_of_point_mass(m, p, N)
assert I == (m * (s**2 + t**2) * (N.x | N.x) -
m * r * s * (N.x | N.y) -
m * r * t * (N.x | N.z) -
m * r * s * (N.y | N.x) +
m * (r**2 + t**2) * (N.y | N.y) -
m * s * t * (N.y | N.z) -
m * r * t * (N.z | N.x) -
m * s * t * (N.z | N.y) +
m * (r**2 + s**2) * (N.z | N.z))
def test_linear_momentum():
N = ReferenceFrame('N')
Ac = Point('Ac')
Ac.set_vel(N, 25 * N.y)
I = outer(N.x, N.x)
A = RigidBody('A', Ac, N, 20, (I, Ac))
P = Point('P')
Pa = Particle('Pa', P, 1)
Pa.point.set_vel(N, 10 * N.x)
assert linear_momentum(N, A, Pa) == 10 * N.x + 500 * N.y
def test_angular_momentum_and_linear_momentum():
m, M, l1 = symbols('m M l1')
q1d = dynamicsymbols('q1d')
N = ReferenceFrame('N')
O = Point('O')
O.set_vel(N, 0 * N.x)
Ac = O.locatenew('Ac', l1 * N.x)
P = Ac.locatenew('P', l1 * N.x)
a = ReferenceFrame('a')
a.set_ang_vel(N, q1d * N.z)
Ac.v2pt_theory(O, N, a)
P.v2pt_theory(O, N, a)
Pa = Particle('Pa', P, m)
I = outer(N.z, N.z)
A = RigidBody('A', Ac, a, M, (I, Ac))
assert linear_momentum(
N, A, Pa) == 2 * m * q1d* l1 * N.y + M * l1 * q1d * N.y
assert angular_momentum(
O, N, A, Pa) == 4 * m * q1d * l1**2 * N.z + q1d * N.z
def test_kinetic_energy():
m, M, l1 = symbols('m M l1')
omega = dynamicsymbols('omega')
N = ReferenceFrame('N')
O = Point('O')
O.set_vel(N, 0 * N.x)
Ac = O.locatenew('Ac', l1 * N.x)
P = Ac.locatenew('P', l1 * N.x)
a = ReferenceFrame('a')
a.set_ang_vel(N, omega * N.z)
Ac.v2pt_theory(O, N, a)
P.v2pt_theory(O, N, a)
Pa = Particle('Pa', P, m)
I = outer(N.z, N.z)
A = RigidBody('A', Ac, a, M, (I, Ac))
assert 0 == kinetic_energy(N, Pa, A) - (M*l1**2*omega**2/2
+ 2*l1**2*m*omega**2 + omega**2/2)
def test_potential_energy():
m, M, l1, g, h, H = symbols('m M l1 g h H')
omega = dynamicsymbols('omega')
N = ReferenceFrame('N')
O = Point('O')
O.set_vel(N, 0 * N.x)
Ac = O.locatenew('Ac', l1 * N.x)
P = Ac.locatenew('P', l1 * N.x)
a = ReferenceFrame('a')
a.set_ang_vel(N, omega * N.z)
Ac.v2pt_theory(O, N, a)
P.v2pt_theory(O, N, a)
Pa = Particle('Pa', P, m)
I = outer(N.z, N.z)
A = RigidBody('A', Ac, a, M, (I, Ac))
Pa.set_potential_energy(m * g * h)
A.set_potential_energy(M * g * H)
assert potential_energy(A, Pa) == m * g * h + M * g * H
def test_mat_inv_mul():
# Uses SymPy generated primes as matrix entries, so each entry in
# each matrix should be symbolic and unique, allowing proper comparison.
# Checks _mat_inv_mul against Matrix.inv / Matrix.__mul__.
from sympy import Matrix, prime
# going to form 3 matrices
# 1 n x n
# different n x n
# 1 n x 2n
n = 3
m1 = Matrix(n, n, lambda i, j: prime(i * n + j + 2))
m2 = Matrix(n, n, lambda i, j: prime(i * n + j + 5))
m3 = Matrix(n, n, lambda i, j: prime(i + j * n + 2))
assert _mat_inv_mul(m1, m2) == m1.inv() * m2
assert _mat_inv_mul(m1, m3) == m1.inv() * m3
|
wdv4758h/ZipPy
|
edu.uci.python.benchmark/src/benchmarks/sympy/sympy/physics/mechanics/tests/test_functions.py
|
Python
|
bsd-3-clause
| 5,068
| 0.004144
|
'''Find valid tags and usernames.
The file will contain things like:
tag:12345:romance
'''
import gzip
import re
import requests
import string
import sys
import time
import random
DEFAULT_HEADERS = {'User-Agent': 'ArchiveTeam'}
class FetchError(Exception):
'''Custom error class when fetching does not meet our expectation.'''
def main():
# Take the program arguments given to this script
# Normal programs use 'argparse' but this keeps things simple
start_num = int(sys.argv[1])
end_num = int(sys.argv[2])
output_filename = sys.argv[3] # this should be something like myfile.txt.gz
assert start_num <= end_num
print('Starting', start_num, end_num)
gzip_file = gzip.GzipFile(output_filename, 'wb')
for shortcode in check_range(start_num, end_num):
# Write the valid result one per line to the file
line = '{0}\n'.format(shortcode)
gzip_file.write(line.encode('ascii'))
gzip_file.close()
print('Done')
def check_range(start_num, end_num):
'''Check if page exists.
Each line is like tag:12345:romance
'''
for num in range(start_num, end_num + 1):
shortcode = num
url = 'http://www.panoramio.com/user/{0}'.format(shortcode)
counter = 0
while True:
# Try 20 times before giving up
if counter > 20:
# This will stop the script with an error
raise Exception('Giving up!')
try:
text = fetch(url)
except FetchError:
# The server may be overloaded so wait a bit
print('Sleeping... If you see this')
time.sleep(10)
else:
if text:
for user in extract_user(text) for tag in extract_tags(text):
yield 'tag:{0}:{1}'.format(user, tag)
break # stop the while loop
counter += 1
def fetch(url):
'''Fetch the URL and check if it returns OK.
Returns True, returns the response text. Otherwise, returns None
'''
print('Fetch', url)
response = requests.get(url, headers=DEFAULT_HEADERS)
# response doesn't have a reason attribute all the time??
print('Got', response.status_code, getattr(response, 'reason'))
if response.status_code == 200:
# The item exists
if not response.text:
# If HTML is empty maybe server broke
raise FetchError()
return response.text
elif response.status_code == 404:
# Does not exist
return
else:
# Problem
raise FetchError()
def extract_user(text):
'''Return a list of tags from the text.'''
# Search for <a href="/user/1707816/tags/Bell%27Italia">Bell'Italia</a>
return re.findall(r'"/user/([^/]+)/tags/', text)
def extract_tags(text):
'''Return a list of tags from the text.'''
# Search for <a href="/user/1707816/tags/Bell%27Italia">Bell'Italia</a>
return re.findall(r'"/user/[0-9]+/tags/([^"]+)"', text)
if __name__ == '__main__':
main()
|
ArchiveTeam/panoramio-discovery
|
discover.py
|
Python
|
unlicense
| 3,093
| 0.00097
|
"""
Router.py uses bot_packages in this file to setup command and sensor value routing to the correct bot_role.
"""
settings= {
"bot_name":"rp4.solalla.ardyh",
"bot_roles":"bot",
"bot_packages":[],
"subscriptions":[],
}
|
wilblack/lilybot
|
rpi_client/bot_roles/local_settings_generic.py
|
Python
|
gpl-2.0
| 242
| 0.028926
|
from astropy import units as u
K_kepler = 0.01720209895 # ua^(3/2) m_{sun} d^(−1)
K = 0.01720209908 * u.au ** (3 / 2) / u.d # ua^(3/2) d^(−1)
UA = 149597870700 * u.m # m
GM1 = 1.32712442099E20 * u.m ** 3 / u.s ** 2 # m^(3) s^(−2)
# m1/m2
Mercury = 6023600
Venus = 408523.719
Earth_Moon = 328900.561400
Mars = 3098703.59
Jupiter = 1047.348644
Saturn = 3497.9018
Uranus = 22902.98
Neptune = 19412.26
Pluto = 136566000
Eris = 119100000
Ceres = 2119000000
Palas = 9700000000
Vesta = 7400000000
|
Camiloasc1/AstronomyUNAL
|
CelestialMechanics/kepler/constants.py
|
Python
|
mit
| 503
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Adriano Monteiro Marques
#
# Author: Piotrek Wasilewski <wasilewski.piotrek@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from core import Chart, ChartColumn
CHART_TOOLS_PACKAGES = ['corechart', 'gauge', 'geochart', 'table', \
'treemap', 'annotatedtimeline']
class InvalidChartsPackage(Exception):
pass
class DatatableColumn(ChartColumn):
"""
"""
type_name = ''
def __init__(self, name, data):
self.name = name
self._data = data
def format(self, value):
return value
def get_data(self):
return [self.format(value) for value in self._data]
data = property(get_data)
class NumberColumn(DatatableColumn):
type_name = 'number'
class StringColumn(DatatableColumn):
type_name = 'string'
def format(self, value):
return "'%s'" % value
class DateColumn(DatatableColumn):
type_name = 'date'
def format(self, value):
return 'new Date(%i, %i, %i)' % \
(value.year, value.month, value.day)
class DatetimeColumn(DatatableColumn):
type_name = 'datetime'
def format(self, value):
return 'new Date(%i, %i, %i, %i, %i, %i)' % \
(value.year, value.month, value.day,
value.hour, value.minute, value.second)
class ChartToolsChart(Chart):
"""
"""
chart_type = ''
def add_column(self, name, data, column_class):
col = column_class(name, data)
self.columns.append(col)
return col
def num_rows(self):
if self.columns:
# we assume that all columns have the same length
return len(self.columns[0])
return 0
class LineChart(ChartToolsChart):
chart_type = 'LineChart'
class ColumnChart(ChartToolsChart):
chart_type = 'ColumnChart'
class ScatterChart(ChartToolsChart):
chart_type = 'ScatterChart'
class AnnotatedTimeLine(ChartToolsChart):
chart_type = 'AnnotatedTimeLine'
class PieChart(ChartToolsChart):
chart_type = 'PieChart'
|
umitproject/network-admin
|
netadmin/utils/charts/charttools.py
|
Python
|
agpl-3.0
| 2,781
| 0.010068
|
"""
Get the stem of a word, given a declined form and its gender.
TODO: Check this logic with von Soden's Grundriss der akkadischen Grammatik.
TODO: Deal with j/y issue.
"""
__author__ = ['M. Willis Monroe <willismonroe@gmail.com>']
__license__ = 'MIT License. See LICENSE.'
ENDINGS = {
'm': {
'singular': {
'nominative': 'um',
'accusative': 'am',
'genitive': 'im'
},
'dual': {
'nominative': 'ān',
'oblique': 'īn'
},
'plural': {
'nominative': 'ū',
'oblique': 'ī'
}
},
'f': {
'singular': {
'nominative': 'tum',
'accusative': 'tam',
'genitive': 'tim'
},
'dual': {
'nominative': 'tān',
'oblique': 'tīn'
},
'plural': {
'nominative': ['ātum', 'ētum', 'ītum'],
'oblique': ['ātim', 'ētim', 'ītum']
}
}
}
class Stemmer(object):
"""Stem Akkadian words with a simple algorithm based on Huehnergard"""
def __init__(self):
self.endings = ENDINGS
def get_stem(self, noun, gender, mimation=True):
"""Return the stem of a noun, given its gender"""
stem = ''
if mimation and noun[-1:] == 'm':
# noun = noun[:-1]
pass
# Take off ending
if gender == 'm':
if noun[-2:] in list(self.endings['m']['singular'].values()) + \
list(self.endings['m']['dual'].values()):
stem = noun[:-2]
elif noun[-1] in list(self.endings['m']['plural'].values()):
stem = noun[:-1]
else:
print("Unknown masculine noun: {}".format(noun))
elif gender == 'f':
if noun[-4:] in self.endings['f']['plural']['nominative'] + \
self.endings['f']['plural']['oblique']:
stem = noun[:-4] + 't'
elif noun[-3:] in list(self.endings['f']['singular'].values()) + \
list(self.endings['f']['dual'].values()):
stem = noun[:-3] + 't'
elif noun[-2:] in list(self.endings['m']['singular'].values()) + \
list(self.endings['m']['dual'].values()):
stem = noun[:-2]
else:
print("Unknown feminine noun: {}".format(noun))
else:
print("Unknown noun: {}".format(noun))
return stem
|
LBenzahia/cltk
|
cltk/stem/akkadian/stem.py
|
Python
|
mit
| 2,502
| 0.000402
|
from chill import *
source('include.c')
destination('includemodified.c')
procedure('main')
loop(0)
original()
print_code()
|
CtopCsUtahEdu/chill-dev
|
examples/chill/testcases/include.script.py
|
Python
|
gpl-3.0
| 129
| 0.007752
|
#!/usr/bin/python
from sys import argv
from modules.helpers.wpdetector import WordpressDetector
from modules.net.scan import is_good_response
from modules.const import ERR, NO, OK, INFO
def main ():
if len (argv) > 1:
print INFO + 'Checking site...'
if not is_good_response (argv [1]):
print ERR + 'Site is unavailable! :('
exit (-1)
print INFO + 'Detecting wordpress...'
wpd = WordpressDetector (argv [1])
if wpd.detect_by_pages ():
print OK + 'Wordpress Detected!'
if raw_input ('Try to detect Wordpress version? (y/n): ') == 'y':
print INFO + 'Detecting Wordpress version...'
dec = wpd.detect_version ()
if dec is not None:
print OK + 'Wordpress Version Detected!' + dec
else:
print NO + 'Wordpress version getting failed!'
exit (0)
else:
print NO + 'This is not Wordpress! :('
else:
print ERR + 'Example: ./detector.py http://blabla.com'
if __name__ == '__main__':
main ()
|
doctorrabb/badtheme
|
detector.py
|
Python
|
gpl-3.0
| 949
| 0.036881
|
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from rigour.errors import ValidationFailed
from rigour.types import *
from rigour.constraints import length_between
import rigour
import pytest
def test_secrecy_declared_before():
t = String().secret().constrain(length_between(4,6))
with pytest.raises(ValidationFailed) as excinfo:
t.check("xxx")
message = str(excinfo)
assert "xxx" not in message
def test_secrecy_declared_after():
t = String().constrain(length_between(4,6)).secret()
with pytest.raises(ValidationFailed) as excinfo:
t.check("xxx")
message = str(excinfo)
assert "xxx" not in message
|
steinarvk/rigour
|
rigour/tests/test_secrecy.py
|
Python
|
apache-2.0
| 1,198
| 0.012521
|
import pygame
import intro
import game
class Intro2(intro.Intro):
def load_image(self):
self.fondo = pygame.image.load('ima/intro2.png').convert()
def go_to_next(self):
new_scene = game.Game(self.world)
self.world.change_scene(new_scene)
|
hectorsanchez/acheckersgame
|
intro2.py
|
Python
|
gpl-2.0
| 274
| 0.007299
|
#!/usr/bin/env python
# PyQt tutorial 3
import sys
from PyQt4 import QtGui
app = QtGui.QApplication(sys.argv)
window = QtGui.QWidget()
window.resize(200, 120)
quit = QtGui.QPushButton("Quit", window)
quit.setFont(QtGui.QFont("Times", 18, QtGui.QFont.Bold))
quit.setGeometry(10, 40, 180, 40)
quit.clicked.connect(app.quit)
window.show()
sys.exit(app.exec_())
|
jacksonwilliams/arsenalsuite
|
cpp/lib/PyQt4/examples/tutorial/t3.py
|
Python
|
gpl-2.0
| 367
| 0
|
class GameStateInterface(object):
def __init__(self):
self._team_ids_to_names = None
self._service_ids_to_names = None
def _team_id_to_name_map(self):
raise NotImplementedError
def _service_id_to_name_map(self):
raise NotImplementedError
def _scored_events_for_tick(self, tick):
raise NotImplementedError
@property
def team_id_to_name_map(self):
if self._team_ids_to_names is None:
self._team_ids_to_names = self._team_id_to_name_map()
return self._team_ids_to_names
@property
def service_id_to_name_map(self):
if self._service_ids_to_names is None:
self._service_ids_to_names = self._service_id_to_name_map()
return self._service_ids_to_names
def scored_events_for_tick(self, tick):
# TODO: maybe cache here? or do we cache in the database side?
return self._scored_events_for_tick(tick)
|
ucsb-seclab/ictf-framework
|
scoring_ictf/scoring_ictf/game_state_interface.py
|
Python
|
gpl-2.0
| 943
| 0
|
"""Test cases that are in common among wemo platform modules.
This is not a test module. These test methods are used by the platform test modules.
"""
import asyncio
import threading
from unittest.mock import patch
from pywemo.ouimeaux_device.api.service import ActionException
from homeassistant.components.homeassistant import (
DOMAIN as HA_DOMAIN,
SERVICE_UPDATE_ENTITY,
)
from homeassistant.const import ATTR_ENTITY_ID, STATE_OFF, STATE_UNAVAILABLE
from homeassistant.core import callback
from homeassistant.setup import async_setup_component
def _perform_registry_callback(hass, pywemo_registry, pywemo_device):
"""Return a callable method to trigger a state callback from the device."""
@callback
def async_callback():
# Cause a state update callback to be triggered by the device.
pywemo_registry.callbacks[pywemo_device.name](pywemo_device, "", "")
return hass.async_block_till_done()
return async_callback
def _perform_async_update(hass, wemo_entity):
"""Return a callable method to cause hass to update the state of the entity."""
@callback
def async_callback():
return hass.services.async_call(
HA_DOMAIN,
SERVICE_UPDATE_ENTITY,
{ATTR_ENTITY_ID: [wemo_entity.entity_id]},
blocking=True,
)
return async_callback
async def _async_multiple_call_helper(
hass,
pywemo_registry,
wemo_entity,
pywemo_device,
call1,
call2,
update_polling_method=None,
):
"""Create two calls (call1 & call2) in parallel; verify only one polls the device.
The platform entity should only perform one update poll on the device at a time.
Any parallel updates that happen at the same time should be ignored. This is
verified by blocking in the update polling method. The polling method should
only be called once as a result of calling call1 & call2 simultaneously.
"""
# get_state is called outside the event loop. Use non-async Python Event.
event = threading.Event()
def get_update(force_update=True):
event.wait()
update_polling_method = update_polling_method or pywemo_device.get_state
update_polling_method.side_effect = get_update
# One of these two calls will block on `event`. The other will return right
# away because the `_update_lock` is held.
_, pending = await asyncio.wait(
[call1(), call2()], return_when=asyncio.FIRST_COMPLETED
)
# Allow the blocked call to return.
event.set()
if pending:
await asyncio.wait(pending)
# Make sure the state update only happened once.
update_polling_method.assert_called_once()
async def test_async_update_locked_callback_and_update(
hass, pywemo_registry, wemo_entity, pywemo_device, **kwargs
):
"""Test that a callback and a state update request can't both happen at the same time.
When a state update is received via a callback from the device at the same time
as hass is calling `async_update`, verify that only one of the updates proceeds.
"""
await async_setup_component(hass, HA_DOMAIN, {})
callback = _perform_registry_callback(hass, pywemo_registry, pywemo_device)
update = _perform_async_update(hass, wemo_entity)
await _async_multiple_call_helper(
hass, pywemo_registry, wemo_entity, pywemo_device, callback, update, **kwargs
)
async def test_async_update_locked_multiple_updates(
hass, pywemo_registry, wemo_entity, pywemo_device, **kwargs
):
"""Test that two hass async_update state updates do not proceed at the same time."""
await async_setup_component(hass, HA_DOMAIN, {})
update = _perform_async_update(hass, wemo_entity)
await _async_multiple_call_helper(
hass, pywemo_registry, wemo_entity, pywemo_device, update, update, **kwargs
)
async def test_async_update_locked_multiple_callbacks(
hass, pywemo_registry, wemo_entity, pywemo_device, **kwargs
):
"""Test that two device callback state updates do not proceed at the same time."""
await async_setup_component(hass, HA_DOMAIN, {})
callback = _perform_registry_callback(hass, pywemo_registry, pywemo_device)
await _async_multiple_call_helper(
hass, pywemo_registry, wemo_entity, pywemo_device, callback, callback, **kwargs
)
async def test_async_locked_update_with_exception(
hass, wemo_entity, pywemo_device, update_polling_method=None
):
"""Test that the entity becomes unavailable when communication is lost."""
assert hass.states.get(wemo_entity.entity_id).state == STATE_OFF
await async_setup_component(hass, HA_DOMAIN, {})
update_polling_method = update_polling_method or pywemo_device.get_state
update_polling_method.side_effect = ActionException
await hass.services.async_call(
HA_DOMAIN,
SERVICE_UPDATE_ENTITY,
{ATTR_ENTITY_ID: [wemo_entity.entity_id]},
blocking=True,
)
assert hass.states.get(wemo_entity.entity_id).state == STATE_UNAVAILABLE
async def test_async_update_with_timeout_and_recovery(hass, wemo_entity, pywemo_device):
"""Test that the entity becomes unavailable after a timeout, and that it recovers."""
assert hass.states.get(wemo_entity.entity_id).state == STATE_OFF
await async_setup_component(hass, HA_DOMAIN, {})
with patch("async_timeout.timeout", side_effect=asyncio.TimeoutError):
await hass.services.async_call(
HA_DOMAIN,
SERVICE_UPDATE_ENTITY,
{ATTR_ENTITY_ID: [wemo_entity.entity_id]},
blocking=True,
)
assert hass.states.get(wemo_entity.entity_id).state == STATE_UNAVAILABLE
# Check that the entity recovers and is available after the update succeeds.
await hass.services.async_call(
HA_DOMAIN,
SERVICE_UPDATE_ENTITY,
{ATTR_ENTITY_ID: [wemo_entity.entity_id]},
blocking=True,
)
assert hass.states.get(wemo_entity.entity_id).state == STATE_OFF
|
turbokongen/home-assistant
|
tests/components/wemo/entity_test_helpers.py
|
Python
|
apache-2.0
| 5,991
| 0.002838
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 14 14:10:41 2016
@author: sigurdja
"""
from setuptools import setup, find_packages
setup(
name="psse_models",
version="0.1",
packages=find_packages(),
)
|
Hofsmo/psse_models
|
setup.py
|
Python
|
gpl-3.0
| 223
| 0.004484
|
# Thanks to Kurt Othmer for BioExplorer design this is translated from
from flow import *
class Flow(object):
def init(self, context):
ch1 = context.get_channel('Channel 1')
#ch1 = Notch(50, input=ch1)
ch1_dc = DCBlock(ch1).ac
ch1_raw = BandPass(0.0, 40.0, input=ch1_dc)
ch1_theta = BandPass(3.0, 7.0, input=ch1_raw, type='elliptic', order=3).output
ch1_beta = BandPass(15.0, 18.0, input=ch1_raw, type='ellipic', order=3).output
ch1_hibeta = BandPass(22, 38.0, input=ch1_raw, type='elliptic', order=3).output
ch1_raw.set(label='Left Raw: 0-40', color='white')
ch1_theta.set(label='Left Theta', color='violet')
ch1_beta.set(label='Left Beta', color='green')
ch1_hibeta.set(label='Left Hi Beta', color='yellow')
self.ch1_theta_threshold = Threshold('L Theta', input=RMS(ch1_theta), mode='decrease', auto_target=90)
self.ch1_beta_threshold = Threshold('L Beta', input=RMS(ch1_beta), mode='range', low_target=90, high_target=95)
self.ch1_hibeta_threshold = Threshold('L Hi-Beta', input=RMS(ch1_hibeta), mode='decrease', auto_target=95)
self.ch1_osci = Oscilloscope('Left Side', moving=False,
channels=[ch1_raw, ch1_theta, ch1_beta, ch1_hibeta])
self.left_spectrum = BarSpectrogram('Left', lo=2.0, hi=30.0, input=ch1_raw, align='right')
ch2 = context.get_channel('Channel 2')
#ch2 = Notch(50, input=ch2)
ch2_dc = DCBlock(ch2).ac
ch2_raw = BandPass(0.0, 40.0, input=ch2_dc)
ch2_theta = BandPass(3.0, 7.0, input=ch2_raw, type='elliptic', order=3).output
ch2_smr = BandPass(12.0, 15.0, input=ch2_raw, type='ellipic', order=3).output
ch2_hibeta = BandPass(22, 38.0, input=ch2_raw, type='elliptic', order=3).output
ch2_raw.set(label='Right Raw: 0-40', color='white')
ch2_theta.set(label='Right Theta', color='violet')
ch2_smr.set(label='Right SMR', color='blue')
ch2_hibeta.set(label='Right Hi Beta', color='yellow')
self.ch2_theta_threshold = Threshold('R Theta', input=RMS(ch2_theta), mode='decrease', auto_target=90)
self.ch2_smr_threshold = Threshold('R SMR', input=RMS(ch2_smr), mode='range', low_target=90, high_target=95)
self.ch2_hibeta_threshold = Threshold('R Hi-Beta', input=RMS(ch2_hibeta), mode='decrease', auto_target=95)
self.ch2_osci = Oscilloscope('Right Side', moving=False,
channels=[ch2_raw, ch2_theta, ch2_smr, ch2_hibeta])
self.right_spectrum = BarSpectrogram('Right', lo=2.0, hi=30.0, input=ch2_raw, align='left')
and_cond = Expression(lambda *args: all(args),
self.ch1_theta_threshold.passfail, self.ch1_beta_threshold.passfail, self.ch1_hibeta_threshold.passfail,
#self.ch2_theta_threshold.passfail, self.ch2_smr_threshold.passfail, self.ch2_hibeta_threshold.passfail
)
video_path = '/Users/jonathansieber/Movies/Adventure.Time.S06E22.The.Cooler.720p.HDTV.x264-W4F.mkv'
self.video = MPlayerControl(video_path, enable=and_cond)
def widget(self):
w = QtGui.QWidget()
layout = QtGui.QGridLayout()
w.setLayout(layout)
layout.addWidget(self.ch1_osci.widget(), 0, 0, 1, 4)
layout.addWidget(self.ch1_theta_threshold.widget(), 1, 0)
layout.addWidget(self.ch1_beta_threshold.widget(), 1, 1)
layout.addWidget(self.ch1_hibeta_threshold.widget(), 1, 2)
layout.addWidget(self.left_spectrum.widget(), 1, 3)
layout.addWidget(self.ch2_osci.widget(), 0, 4, 1, 4)
layout.addWidget(self.ch2_theta_threshold.widget(), 1, 5)
layout.addWidget(self.ch2_smr_threshold.widget(), 1, 6)
layout.addWidget(self.ch2_hibeta_threshold.widget(), 1, 7)
layout.addWidget(self.right_spectrum.widget(), 1, 4)
return w
def flow():
return Flow()
|
strfry/OpenNFB
|
protocols/2_ch_c3beta_c4smr_kro.py
|
Python
|
gpl-3.0
| 3,567
| 0.024951
|
#!/usr/bin/env python
"""
A simple interface to download Sentinel-1 and Sentinel-2 datasets from
the COPERNICUS Sentinel Hub.
"""
from functools import partial
import hashlib
import os
import datetime
import sys
import xml.etree.cElementTree as ET
import re
import requests
from concurrent import futures
import logging
logging.basicConfig(level=logging.INFO)
LOG = logging.getLogger(__name__)
logging.getLogger("requests").setLevel(logging.CRITICAL)
logging.getLogger("urllib3").setLevel(logging.CRITICAL)
# hub_url = "https://scihub.copernicus.eu/dhus/search?q="
#hub_url = "https://scihub.copernicus.eu/s3hub/search?q="
hub_url= "https://scihub.copernicus.eu/apihub/search?q="
requests.packages.urllib3.disable_warnings()
def calculate_md5(fname):
hasher = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hasher.update(chunk)
return hasher.hexdigest().upper()
def do_query(query, user="guest", passwd="guest"):
"""
A simple function to pass a query to the Sentinel scihub website. If
successful this function will return the XML file back for further
processing.
query: str
A query string, such as "https://scihub.copernicus.eu/dhus/odata/v1/"
"Products?$orderby=IngestionDate%20desc&$top=100&$skip=100"
Returns:
The relevant XML file, or raises error
"""
r = requests.get(query, auth=(user, passwd), verify=False)
if r.status_code == 200:
return r.text
else:
raise IOError("Something went wrong! Error code %d" % r.status_code)
def download_product(source, target, user="guest", passwd="guest"):
"""
Download a product from the SentinelScihub site, and save it to a named
local disk location given by ``target``.
source: str
A product fully qualified URL
target: str
A filename where to download the URL specified
"""
md5_source = source.replace("$value", "/Checksum/Value/$value")
r = requests.get(md5_source, auth=(user, passwd), verify=False)
md5 = r.text
if os.path.exists(target):
md5_file = calculate_md5(target)
if md5 == md5_file:
return
chunks = 1048576 # 1MiB...
while True:
LOG.debug("Getting %s" % source)
r = requests.get(source, auth=(user, passwd), stream=True,
verify=False)
if not r.ok:
raise IOError("Can't start download... [%s]" % source)
file_size = int(r.headers['content-length'])
LOG.info("Downloading to -> %s" % target)
LOG.info("%d bytes..." % file_size)
with open(target, 'wb') as fp:
cntr = 0
dload = 0
for chunk in r.iter_content(chunk_size=chunks):
if chunk:
cntr += 1
if cntr > 100:
dload += cntr * chunks
LOG.info("\tWriting %d/%d [%5.2f %%]" % (dload, file_size,
100. * float(dload) /
float(file_size)))
sys.stdout.flush()
cntr = 0
fp.write(chunk)
fp.flush()
os.fsync(fp)
md5_file = calculate_md5(target)
if md5_file == md5:
break
return
def parse_xml(xml):
"""
Parse an OData XML file to havest some relevant information re products
available and so on. It will return a list of dictionaries, with one
dictionary per product returned from the query. Each dicionary will have a
number of keys (see ``fields_of_interest``), as well as ``link`` and
``qui
"""
fields_of_interest = ["filename", "identifier", "instrumentshortname",
"orbitnumber", "orbitdirection", "producttype",
"beginposition", "endposition"]
tree = ET.ElementTree(ET.fromstring(xml))
# Search for all the acquired images...
granules = []
for elem in tree.iter(tag="{http://www.w3.org/2005/Atom}entry"):
granule = {}
for img in elem.getchildren():
if img.tag.find("id") >= 0:
granule['id'] = img.text
if img.tag.find("link") and img.attrib.has_key("href"):
if img.attrib['href'].find("Quicklook") >= 0:
granule['quicklook'] = img.attrib['href']
elif img.attrib['href'].find("$value") >= 0:
granule['link'] = img.attrib['href'].replace("$value", "")
if img.attrib.has_key("name"):
if img.attrib['name'] in fields_of_interest:
granule[img.attrib['name']] = img.text
granules.append(granule)
return granules
# print img.tag, img.attrib, img.text
# for x in img.getchildren():
def download_sentinel(location, input_start_date, input_sensor, output_dir,
input_end_date=None, username="guest", password="guest"):
input_sensor = input_sensor.upper()
sensor_list = ["S1", "S2", "S3"]
if not input_sensor in sensor_list:
raise ValueError("Sensor can only be S1, S2 or S3. You provided %s"
% input_sensor)
else:
if input_sensor.upper() == "S1":
sensor = "Sentinel-1"
elif input_sensor.upper() == "S2":
sensor = "Sentinel-2"
elif input_sensor.upper() == "S3":
sensor= "Sentinel-3"
sensor_str = 'platformname:%s' % sensor
#sensor_str = 'filename:%s' % input_sensor.upper()
try:
start_date = datetime.datetime.strptime(input_start_date,
"%Y.%m.%d").isoformat()
except ValueError:
try:
start_date = datetime.datetime.strptime(input_start_date,
"%Y-%m-%d").isoformat()
except ValueError:
start_date = datetime.datetime.strptime(input_start_date,
"%Y/%j").isoformat()
start_date = start_date + "Z"
if input_end_date is None:
end_date = "NOW"
else:
try:
end_date = datetime.datetime.strptime(input_end_date,
"%Y.%m.%d").isoformat()
except ValueError:
try:
end_date = datetime.datetime.strptime(input_end_date,
"%Y-%m-%d").isoformat()
except ValueError:
end_date = datetime.datetime.strptime(input_end_date,
"%Y/%j").isoformat()
if len(location) == 2:
location_str = 'footprint:"Intersects(%f, %f)"' % (location[0], location[1])
elif len(location) == 4:
location_str = 'footprint:"Intersects( POLYGON(( " + \
"%f %f, %f %f, %f %f, %f %f, %f %f) ))"' % (
location[0], location[0],
location[0], location[1],
location[1], location[1],
location[1], location[0],
location[0], location[0])
time_str = 'beginposition:[%s TO %s]' % (start_date, end_date)
query = "%s AND %s AND %s" % (location_str, time_str, sensor_str)
query = "%s%s" % (hub_url, query)
# query = "%s%s" % ( hub_url, urllib2.quote(query ) )
LOG.debug(query)
import pdb;pdb.set_trace()
result = do_query(query, user=username, passwd=password)
granules = parse_xml(result)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
ret_files = []
for granule in granules:
download_product(granule['link'] + "$value", os.path.join(output_dir,
granule['filename'].replace("SAFE", "zip")),
user=username, passwd=password)
ret_files.append(os.path.join(output_dir,
granule['filename'].replace("SAFE", "zip")))
return granules, ret_files
if __name__ == "__main__": # location = (43.3650, -8.4100)
# input_start_date = "2015.01.01"
# input_end_date = None
# username = "guest"
# password = "guest"
# input_sensor = "S2"
# output_dir = "/data/selene/ucfajlg/tmp/"
# granules, retfiles = download_sentinel ( location, input_start_date,
# input_sensor, output_dir )
lng = -8.4100
lat = 43.3650
#lat = 39.0985 # Barrax
#lng = -2.1082
#lat = 28.55 # Libya 4
#lng = 23.39
print "Testing S2 on COPERNICUS scientific hub"
location=(lat,lng)
input_start_date="2017.1.1"
input_sensor="S3"
output_dir="/tmp/"
username="s3guest"
password="s3guest"
print "Set username and password variables for Sentinel hub!!!"
download_sentinel(location, input_start_date, input_sensor, output_dir,
input_end_date=None, username=username, password=password)
|
jgomezdans/grabba_grabba_hey
|
grabba_grabba_hey/sentinel3_downloader.py
|
Python
|
gpl-2.0
| 9,042
| 0.003981
|
#!/usr/bin/env python
# Copyright (c) 2012-2013 Turbulenz Limited
from logging import basicConfig, CRITICAL, INFO, WARNING
import argparse
from urllib3 import connection_from_url
from urllib3.exceptions import HTTPError, SSLError
from simplejson import loads as json_loads, dump as json_dump
from gzip import GzipFile
from zlib import decompress as zlib_decompress
from time import strptime, strftime, gmtime
from calendar import timegm
from re import compile as re_compile
from sys import stdin, argv
from os import mkdir
from os.path import exists as path_exists, join as path_join, normpath
from getpass import getpass, GetPassWarning
from base64 import urlsafe_b64decode
__version__ = '2.1.2'
__dependencies__ = []
HUB_COOKIE_NAME = 'hub'
HUB_URL = 'https://hub.turbulenz.com/'
DATATYPE_DEFAULT = 'events'
DATATYPE_URL = { 'events': '/dynamic/project/%s/event-log',
'users': '/dynamic/project/%s/user-info' }
DAY = 86400
TODAY_START = (timegm(gmtime()) / DAY) * DAY
# pylint: disable=C0301
USERNAME_PATTERN = re_compile('^[a-z0-9]+[a-z0-9-]*$') # usernames
PROJECT_SLUG_PATTERN = re_compile('^[a-zA-Z0-9\-]*$') # game
# pylint: enable=C0301
class DateRange(object):
"""Maintain a time range between two dates. If only a start time is given it will generate a 24 hour period
starting at that time. Defaults to the start of the current day if no times are given"""
def __init__(self, start=TODAY_START, end=None):
self.start = start
if end:
self.end = end
else:
self.end = start + DAY
if self.start > self.end:
raise ValueError('Start date can\'t be greater than the end date')
def _range_str(t):
if t % DAY:
return strftime('%Y-%m-%d %H:%M:%SZ', gmtime(t))
else:
return strftime('%Y-%m-%d', gmtime(t))
self.start_str = _range_str(self.start)
if self.end % DAY:
self.end_str = _range_str(self.end)
else:
self.end_str = _range_str(self.end - DAY)
def filename_str(self):
if self.start_str == self.end_str:
return self.start_str
elif int(self.start / DAY) == int(self.end / DAY):
result = '%s_-_%s' % (strftime('%Y-%m-%d %H:%M:%SZ', gmtime(self.start)),
strftime('%Y-%m-%d %H:%M:%SZ', gmtime(self.end)))
return result.replace(' ', '_').replace(':', '-')
else:
result = '%s_-_%s' % (self.start_str, self.end_str)
return result.replace(' ', '_').replace(':', '-')
@staticmethod
def parse(range_str):
date_format = '%Y-%m-%d'
range_parts = range_str.split(':')
if len(range_parts) < 1:
error('Date not set')
exit(1)
elif len(range_parts) > 2:
error('Can\'t provide more than two dates for date range')
exit(1)
try:
start = int(timegm(strptime(range_parts[0], date_format)))
end = None
if len(range_parts) == 2:
end = int(timegm(strptime(range_parts[1], date_format))) + DAY
except ValueError:
error('Dates must be in the yyyy-mm-dd format')
exit(1)
return DateRange(start, end)
def log(message, new_line=True):
print '\r >> %s' % message,
if new_line:
print
def error(message):
log('[ERROR] - %s' % message)
def warning(message):
log('[WARNING] - %s' % message)
def _parse_args():
parser = argparse.ArgumentParser(description="Export event logs and anonymised user information of a game.")
parser.add_argument("-v", "--verbose", action="store_true", help="verbose output")
parser.add_argument("-s", "--silent", action="store_true", help="silent running")
parser.add_argument("--version", action='version', version=__version__)
parser.add_argument("-u", "--user", action="store",
help="Hub login username (will be requested if not provided)")
parser.add_argument("-p", "--password", action="store",
help="Hub login password (will be requested if not provided)")
parser.add_argument("-t", "--type", action="store", default=DATATYPE_DEFAULT,
help="type of data to download, either events or users (defaults to " + DATATYPE_DEFAULT + ")")
parser.add_argument("-d", "--daterange", action="store", default=TODAY_START,
help="individual 'yyyy-mm-dd' or range 'yyyy-mm-dd : yyyy-mm-dd' of dates to get the data " \
"for (defaults to today)")
parser.add_argument("-o", "--outputdir", action="store", default="",
help="folder to output the downloaded files to (defaults to current directory)")
parser.add_argument("-w", "--overwrite", action="store_true",
help="if a file to be downloaded exists in the output directory, " \
"overwrite instead of skipping it")
parser.add_argument("--indent", action="store_true", help="apply indentation to the JSON output")
parser.add_argument("--hub", default=HUB_URL, help="Hub url (defaults to https://hub.turbulenz.com/)")
parser.add_argument("project", metavar='project_slug', help="Slug of Hub project you wish to download from")
args = parser.parse_args(argv[1:])
if args.silent:
basicConfig(level=CRITICAL)
elif args.verbose:
basicConfig(level=INFO)
else:
basicConfig(level=WARNING)
if not PROJECT_SLUG_PATTERN.match(args.project):
error('Incorrect "project" format')
exit(-1)
username = args.user
if not username:
print 'Username: ',
username = stdin.readline()
if not username:
error('Login information required')
exit(-1)
username = username.strip()
args.user = username
if not USERNAME_PATTERN.match(username):
error('Incorrect "username" format')
exit(-1)
if not args.password:
try:
args.password = getpass()
except GetPassWarning:
error('Echo free password entry unsupported. Please provide a --password argument')
return -1
if args.type not in ['events', 'users']:
error('Type must be one of \'events\' or \'users\'')
exit(1)
if isinstance(args.daterange, int):
args.daterange = DateRange(args.daterange)
else:
args.daterange = DateRange.parse(args.daterange)
return args
def login(connection, options):
username = options.user
password = options.password
if not options.silent:
log('Login as "%s".' % username)
credentials = {'login': username,
'password': password,
'source': '/tool'}
try:
r = connection.request('POST',
'/dynamic/login',
fields=credentials,
retries=1,
redirect=False)
except (HTTPError, SSLError):
error('Connection to Hub failed!')
exit(-1)
if r.status != 200:
if r.status == 301:
redirect_location = r.headers.get('location', '')
end_domain = redirect_location.find('/dynamic/login')
error('Login is being redirected to "%s". Please verify the Hub URL.' % redirect_location[:end_domain])
else:
error('Wrong user login information!')
exit(-1)
cookie = r.headers.get('set-cookie', None)
login_info = json_loads(r.data)
# pylint: disable=E1103
if not cookie or HUB_COOKIE_NAME not in cookie or login_info.get('source') != credentials['source']:
error('Hub login failed!')
exit(-1)
# pylint: enable=E1103
return cookie
def logout(connection, cookie):
try:
connection.request('POST',
'/dynamic/logout',
headers={'Cookie': cookie},
redirect=False)
except (HTTPError, SSLError) as e:
error(str(e))
def _request_data(options):
daterange = options.daterange
params = { 'start_time': daterange.start,
'end_time': daterange.end,
'version': __version__ }
connection = connection_from_url(options.hub, timeout=8.0)
cookie = login(connection, options)
try:
r = connection.request('GET',
DATATYPE_URL[options.type] % options.project,
headers={'Cookie': cookie,
'Accept-Encoding': 'gzip'},
fields=params,
redirect=False)
except (HTTPError, SSLError) as e:
error(e)
exit(-1)
# pylint: disable=E1103
r_data = json_loads(r.data)
if r.status != 200:
error_msg = 'Wrong Hub answer.'
if r_data.get('msg', None):
error_msg += ' ' + r_data['msg']
if r.status == 403:
error_msg += ' Make sure the project you\'ve specified exists and you have access to it.'
error(error_msg)
exit(-1)
# pylint: enable=E1103
if options.verbose:
log('Data received from the hub')
log('Logging out')
logout(connection, cookie)
return r_data
def write_to_file(options, data, filename=None, output_path=None, force_overwrite=False):
if not filename:
filename = '%s-%s-%s.json' % (options.project, options.type, options.daterange.filename_str())
try:
if not output_path:
output_path = normpath(path_join(options.outputdir, filename))
if path_exists(output_path):
if options.overwrite or force_overwrite:
if not options.silent:
warning('Overwriting existing file: %s' % output_path)
elif not options.silent:
warning('Skipping existing file: %s' % output_path)
return
indentation = None
if options.indent:
indentation = 4
if isinstance(data, str):
data = json_loads(data)
with open(output_path, 'wb') as fout:
if isinstance(data, str):
fout.write(data)
else:
json_dump(data, fout, indent=indentation)
if options.verbose:
log('Finished writing to: %s' % output_path)
except (IOError, OSError) as e:
error(e)
exit(-1)
try:
# pylint: disable=F0401
from Crypto.Cipher.AES import new as aes_new, MODE_CBC
# pylint: enable=F0401
def decrypt_data(data, key):
# Need to use a key of length 32 bytes for AES-256
if len(key) != 32:
error('Invalid key length for AES-256')
exit(-1)
# IV is last 16 bytes
iv = data[-16 :]
data = data[: -16]
data = aes_new(key, MODE_CBC, iv).decrypt(data)
# Strip PKCS7 padding required for CBC
if len(data) % 16:
error('Corrupted data - invalid length')
exit(-1)
num_padding = ord(data[-1])
if num_padding > 16:
error('Corrupted data - invalid padding')
exit(-1)
return data[: -num_padding]
except ImportError:
from io import BytesIO
from subprocess import Popen, STDOUT, PIPE
from struct import pack
def decrypt_data(data, key):
# Need to use a key of length 32 bytes for AES-256
if len(key) != 32:
error('Invalid key length for AES-256')
exit(-1)
aesdata = BytesIO()
aesdata.write(key)
aesdata.write(pack('I', len(data)))
aesdata.write(data)
process = Popen('aesdecrypt', stderr=STDOUT, stdout=PIPE, stdin=PIPE, shell=True)
output, _ = process.communicate(input=aesdata.getvalue())
retcode = process.poll()
if retcode != 0:
error('Failed to run aesdecrypt, check it is on the path or install PyCrypto')
exit(-1)
return str(output)
def get_log_files_local(options, files_list, enc_key):
verbose = options.verbose
silent = options.silent
overwrite = options.overwrite
output_dir = options.outputdir
filename_prefix = options.project + '-'
try:
for filename in files_list:
if filename.startswith('http'):
error('Unexpected file to retrieve')
exit(-1)
# Format v1: 'eventlogspath/gamefolder/events-yyyy-mm-dd.json.gz'
# Format v2: 'eventlogspath/gamefolder/events-yyyy-mm-dd.bin'
# Convert to 'gameslug-events-yyyy-mm-dd.json'
filename_patched = filename_prefix + filename.rsplit('/', 1)[-1].split('.', 1)[0] + '.json'
output_path = normpath(path_join(output_dir, filename_patched))
if not overwrite and path_exists(output_path):
if not silent:
warning('Skipping existing file: %s' % output_path)
continue
if verbose:
log('Retrieving file: %s' % filename_patched)
if filename.endswith('.bin'):
with open(filename, 'rb') as fin:
file_content = fin.read()
file_content = decrypt_data(file_content, enc_key)
file_content = zlib_decompress(file_content)
else: # if filename.endswith('.json.gz'):
gzip_file = GzipFile(filename=filename, mode='rb')
file_content = gzip_file.read()
gzip_file.close()
file_content = decrypt_data(file_content, enc_key)
write_to_file(options, file_content, filename=filename_patched, output_path=output_path)
except (IOError, OSError) as e:
error(e)
exit(-1)
def get_log_files_s3(options, files_list, enc_key, connection):
verbose = options.verbose
silent = options.silent
overwrite = options.overwrite
output_dir = options.outputdir
filename_prefix = options.project + '-'
try:
for filename in files_list:
# Format v1: 'https://bucket.s3.amazonaws.com/gamefolder/events-yyyy-mm-dd.json?AWSAccessKeyId=keyid
# &Expires=timestamp&Signature=signature'
# Format v2: 'https://bucket.s3.amazonaws.com/gamefolder/events-yyyy-mm-dd.bin?AWSAccessKeyId=keyid
# &Expires=timestamp&Signature=signature'
# Convert to 'gameslug-events-yyyy-mm-dd.json'
filename_cleaned = filename.split('?', 1)[0].rsplit('/', 1)[-1]
filename_patched = filename_prefix + filename_cleaned.split('.', 1)[0] + '.json'
output_path = normpath(path_join(output_dir, filename_patched))
if not overwrite and path_exists(output_path):
if not silent:
warning('Skipping existing file: %s' % output_path)
continue
if verbose:
log('Requesting file: %s' % filename_patched)
r = connection.request('GET', filename, redirect=False)
# pylint: disable=E1103
if r.status != 200:
error_msg = 'Couldn\'t download %s.' % filename_patched
if r.data.get('msg', None):
error_msg += ' ' + r.data['msg']
error(str(r.status) + error_msg)
exit(-1)
# pylint: enable=E1103
r_data = decrypt_data(r.data, enc_key)
if filename_cleaned.endswith('.bin'):
r_data = zlib_decompress(r_data)
# Format v1 file gets uncompressed on download so we just decrypt it
write_to_file(options, r_data, filename=filename_patched, output_path=output_path)
except (HTTPError, SSLError) as e:
error(e)
exit(-1)
def get_objectid_timestamp(objectid):
return int(str(objectid)[0:8], 16)
def inline_array_events_local(options, today_log, array_files_list, enc_key):
verbose = options.verbose
to_sort = set()
try:
index = 0
for index, filename in enumerate(array_files_list):
# Format: 'eventlogspath/gamefolder/arrayevents/date(seconds)/objectid.bin'
# The objectid doesn't correspond to a database entry but is used for uniqueness and timestamp
filename = filename.replace('\\', '/')
event_objectid = filename.rsplit('/', 1)[-1].split('.', 1)[0]
timestamp = get_objectid_timestamp(event_objectid)
formatted_timestamp = strftime('%Y-%m-%d %H:%M:%S', gmtime(timestamp))
if verbose:
log('Retrieving events file ' + str(index + 1) + ' submitted at ' + formatted_timestamp)
with open(filename, 'rb') as fin:
file_content = fin.read()
file_content = decrypt_data(file_content, enc_key)
file_content = json_loads(zlib_decompress(file_content))
if not isinstance(file_content, list):
file_content = [file_content]
for event in file_content:
slug = event['slug']
del event['slug']
event['time'] = strftime('%Y-%m-%d %H:%M:%S', gmtime(event['time']))
if slug not in today_log:
today_log[slug] = { 'playEvents': [], 'customEvents': [] }
today_log[slug]['customEvents'].append(event)
# Maintaining a list of slugs to sort the customEvents by date for so that added array events appear in
# order but we do not unneccesarily sort large lists if an array event wasn't added to it
to_sort.add(slug)
for slug in to_sort:
today_log[slug]['customEvents'].sort(key=lambda k: k['time'])
return today_log
except (IOError, OSError) as e:
error(e)
exit(-1)
def inline_array_events_s3(options, today_log, array_files_list, enc_key, connection):
verbose = options.verbose
to_sort = set()
try:
for index, filename in enumerate(array_files_list):
# Format: 'https://bucket.s3.amazonaws.com/gamefolder/arrayevents/date(seconds)/objectid.bin?
# AWSAccessKeyId=keyid&Expires=timestamp&Signature=signature'
# The objectid doesn't correspond to a database entry but it used for uniqueness and timestamp
filename_cleaned = filename.split('?', 1)[0].rsplit('/', 1)[-1]
event_objectid = filename_cleaned.split('.', 1)[0]
timestamp = get_objectid_timestamp(event_objectid)
formatted_timestamp = strftime('%Y-%m-%d %H:%M:%S', gmtime(timestamp))
if verbose:
log('Requesting events file ' + str(index + 1) + ' submitted at ' + formatted_timestamp)
r = connection.request('GET', filename, redirect=False)
# pylint: disable=E1103
if r.status != 200:
error_msg = 'Couldn\'t download event %d.' % (index + 1)
if r.data.get('msg', None):
error_msg += ' ' + r.data['msg']
error(str(r.status) + error_msg)
exit(-1)
# pylint: enable=E1103
r_data = decrypt_data(r.data, enc_key)
r_data = json_loads(zlib_decompress(r_data))
if not isinstance(r_data, list):
r_data = [r_data]
for event in r_data:
slug = event['slug']
del event['slug']
event['time'] = strftime('%Y-%m-%d %H:%M:%S', gmtime(event['time']))
if slug not in today_log:
today_log[slug] = { 'playEvents': [], 'customEvents': [] }
today_log[slug]['customEvents'].append(event)
# Maintaining a list of slugs to sort the customEvents by date for so that added array events appear in
# order but we do not unneccesarily sort large lists if an array event wasn't added to it
to_sort.add(slug)
for slug in to_sort:
today_log[slug]['customEvents'].sort(key=lambda k: k['time'])
return today_log
except (HTTPError, SSLError) as e:
error(e)
exit(-1)
def patch_and_write_today_log(options, resp_daterange, today_log, array_files_list, enc_key, connection):
today_range = DateRange(int(resp_daterange.end / DAY) * DAY, int(resp_daterange.end))
filename = '%s-%s-%s.json' % (options.project, options.type, today_range.filename_str())
output_path = normpath(path_join(options.outputdir, filename))
if not options.overwrite and path_exists(output_path):
if not options.silent:
# Confirm skip as does not make sense to request today's data just to skip overwriting it locally
log('Overwriting is disabled. Are you sure you want to skip overwriting today\'s downloaded log? ' \
'(Press \'y\' to skip or \'n\' to overwrite)')
skip_options = ['y', 'n']
for attempt in xrange(1, 4): # default to skip after three bad attempts
log('', new_line=False)
skip = stdin.readline().strip().lower()
if skip in skip_options:
break
error('Please answer with \'y\' or \'n\'. (Attempt %d of 3)' % attempt)
if 'n' != skip:
warning('Skipping overwriting today\'s downloaded file: %s' % output_path)
return
else:
warning('Overwrite disabled but overwriting today\'s downloaded file: %s' % output_path)
else: # Do not ask in silent mode, default to the option passed
return
if array_files_list:
if options.verbose:
log('Patching today\'s log file to include array events')
if connection:
today_log = inline_array_events_s3(options, today_log, array_files_list, enc_key, connection)
else:
today_log = inline_array_events_local(options, today_log, array_files_list, enc_key)
write_to_file(options, today_log, filename=filename, output_path=output_path, force_overwrite=True)
# pylint: disable=E1103
def main():
options = _parse_args()
silent = options.silent
if not silent:
log('Downloading \'%s\' to %s.' % (options.type, options.outputdir or 'current directory'))
try:
r_data = _request_data(options)
try:
response_daterange = DateRange(r_data['start_time'], r_data['end_time'])
datatype = options.type
if 'users' == datatype:
user_data = r_data['user_data']
else: # if 'events' == datatype
logs_url = r_data['logs_url']
files_list = r_data['files_list']
array_files_list = r_data['array_files_list']
enc_key = r_data['key']
if enc_key is not None:
# enc_key can be a unicode string and we need a stream of ascii bytes
enc_key = urlsafe_b64decode(enc_key.encode('ascii'))
today_log = r_data['today_log']
except KeyError as e:
error('Missing information in response: %s' % e)
exit(-1)
del r_data
daterange = options.daterange
if not silent:
if response_daterange.start != daterange.start:
warning('Start date used (%s) not the same as what was specified (%s)' % \
(response_daterange.start_str, daterange.start_str))
if response_daterange.end != daterange.end:
warning('End date used (%s) not the same as what was specified (%s)' % \
(response_daterange.end_str, daterange.end_str))
options.daterange = response_daterange
output_dir = options.outputdir
if output_dir and not path_exists(output_dir):
# Not allowing creation of nested directories as greater chance of typos and misplaced files
mkdir(output_dir)
if 'users' == datatype:
write_to_file(options, user_data)
else: # if 'events' == datatype
connection = None
if logs_url and (files_list or array_files_list):
connection = connection_from_url(logs_url, timeout=8.0)
if files_list:
if logs_url:
get_log_files_s3(options, files_list, enc_key, connection)
else:
get_log_files_local(options, files_list, enc_key)
del files_list
if response_daterange.end > TODAY_START:
# Patch and write, if requested, today's log with the array events downloaded and inlined
patch_and_write_today_log(options, response_daterange, today_log, array_files_list, enc_key, connection)
del today_log
del array_files_list
if not silent:
log('Export completed successfully')
except KeyboardInterrupt:
if not silent:
warning('Program stopped by user')
exit(-1)
except OSError as e:
error(str(e))
exit(-1)
except Exception as e:
error(str(e))
exit(-1)
return 0
# pylint: enable=E1103
if __name__ == "__main__":
exit(main())
|
turbulenz/turbulenz_tools
|
turbulenz_tools/tools/exportevents.py
|
Python
|
mit
| 25,568
| 0.003559
|
"""
Unit tests for LMS instructor-initiated background tasks.
Runs tasks on answers to course problems to validate that code
paths actually work.
"""
import json
from uuid import uuid4
from itertools import cycle, chain, repeat
from mock import patch, Mock
from smtplib import SMTPServerDisconnected, SMTPDataError, SMTPConnectError, SMTPAuthenticationError
from boto.ses.exceptions import (
SESAddressNotVerifiedError,
SESIdentityNotVerifiedError,
SESDomainNotConfirmedError,
SESAddressBlacklistedError,
SESDailyQuotaExceededError,
SESMaxSendingRateExceededError,
SESDomainEndsWithDotError,
SESLocalAddressCharacterError,
SESIllegalAddressError,
)
from boto.exception import AWSConnectionError
from celery.states import SUCCESS, FAILURE
from django.conf import settings
from django.core.management import call_command
from bulk_email.models import CourseEmail, Optout, SEND_TO_ALL, SEND_TO_ALL_INCLUDE_OPTOUT
from bulk_email.tasks import _filter_optouts_from_recipients
from instructor_task.tasks import send_bulk_course_email
from instructor_task.subtasks import update_subtask_status, SubtaskStatus
from instructor_task.models import InstructorTask
from instructor_task.tests.test_base import InstructorTaskCourseTestCase
from instructor_task.tests.factories import InstructorTaskFactory
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from student.models import UserStanding
class TestTaskFailure(Exception):
"""Dummy exception used for unit tests."""
pass
def my_update_subtask_status(entry_id, current_task_id, new_subtask_status):
"""
Check whether a subtask has been updated before really updating.
Check whether a subtask which has been retried
has had the retry already write its results here before the code
that was invoking the retry had a chance to update this status.
This is the norm in "eager" mode (used by tests) where the retry is called
and run to completion before control is returned to the code that
invoked the retry. If the retries eventually end in failure (e.g. due to
a maximum number of retries being attempted), the "eager" code will return
the error for each retry as it is popped off the stack. We want to just ignore
the later updates that are called as the result of the earlier retries.
This should not be an issue in production, where status is updated before
a task is retried, and is then updated afterwards if the retry fails.
"""
entry = InstructorTask.objects.get(pk=entry_id)
subtask_dict = json.loads(entry.subtasks)
subtask_status_info = subtask_dict['status']
current_subtask_status = SubtaskStatus.from_dict(subtask_status_info[current_task_id])
current_retry_count = current_subtask_status.get_retry_count()
new_retry_count = new_subtask_status.get_retry_count()
if current_retry_count <= new_retry_count:
update_subtask_status(entry_id, current_task_id, new_subtask_status)
@patch('bulk_email.models.html_to_text', Mock(return_value='Mocking CourseEmail.text_message'))
class TestBulkEmailInstructorTask(InstructorTaskCourseTestCase):
"""Tests instructor task that send bulk email."""
def setUp(self):
super(TestBulkEmailInstructorTask, self).setUp()
self.initialize_course()
self.instructor = self.create_instructor('instructor')
# load initial content (since we don't run migrations as part of tests):
call_command("loaddata", "course_email_template.json")
def _create_input_entry(self, course_id=None, to_option=None):
"""
Creates a InstructorTask entry for testing.
Overrides the base class version in that this creates CourseEmail.
"""
to_option = to_option or SEND_TO_ALL
course_id = course_id or self.course.id
course_email = CourseEmail.create(course_id, self.instructor, to_option, "Test Subject", "<p>This is a test message</p>")
task_input = {'email_id': course_email.id} # pylint: disable=no-member
task_id = str(uuid4())
instructor_task = InstructorTaskFactory.create(
course_id=course_id,
requester=self.instructor,
task_input=json.dumps(task_input),
task_key='dummy value',
task_id=task_id,
)
return instructor_task
def _run_task_with_mock_celery(self, task_class, entry_id, task_id):
"""Submit a task and mock how celery provides a current_task."""
mock_current_task = Mock()
mock_current_task.max_retries = settings.BULK_EMAIL_MAX_RETRIES
mock_current_task.default_retry_delay = settings.BULK_EMAIL_DEFAULT_RETRY_DELAY
task_args = [entry_id, {}]
with patch('bulk_email.tasks._get_current_task') as mock_get_task:
mock_get_task.return_value = mock_current_task
return task_class.apply(task_args, task_id=task_id).get()
def test_email_missing_current_task(self):
task_entry = self._create_input_entry()
with self.assertRaises(ValueError):
send_bulk_course_email(task_entry.id, {})
def test_email_undefined_course(self):
# Check that we fail when passing in a course that doesn't exist.
task_entry = self._create_input_entry(course_id=SlashSeparatedCourseKey("bogus", "course", "id"))
with self.assertRaises(ValueError):
self._run_task_with_mock_celery(send_bulk_course_email, task_entry.id, task_entry.task_id)
def test_bad_task_id_on_update(self):
task_entry = self._create_input_entry()
def dummy_update_subtask_status(entry_id, _current_task_id, new_subtask_status):
"""Passes a bad value for task_id to test update_subtask_status"""
bogus_task_id = "this-is-bogus"
update_subtask_status(entry_id, bogus_task_id, new_subtask_status)
with self.assertRaises(ValueError):
with patch('bulk_email.tasks.update_subtask_status', dummy_update_subtask_status):
send_bulk_course_email(task_entry.id, {}) # pylint: disable=no-member
def _create_students(self, num_students):
"""Create students for testing"""
return [self.create_student('robot%d' % i) for i in xrange(num_students)]
def _assert_single_subtask_status(self, entry, succeeded, failed=0, skipped=0, retried_nomax=0, retried_withmax=0):
"""Compare counts with 'subtasks' entry in InstructorTask table."""
subtask_info = json.loads(entry.subtasks)
# verify subtask-level counts:
self.assertEquals(subtask_info.get('total'), 1)
self.assertEquals(subtask_info.get('succeeded'), 1 if succeeded > 0 else 0)
self.assertEquals(subtask_info.get('failed'), 0 if succeeded > 0 else 1)
# verify individual subtask status:
subtask_status_info = subtask_info.get('status')
task_id_list = subtask_status_info.keys()
self.assertEquals(len(task_id_list), 1)
task_id = task_id_list[0]
subtask_status = subtask_status_info.get(task_id)
print("Testing subtask status: {}".format(subtask_status))
self.assertEquals(subtask_status.get('task_id'), task_id)
self.assertEquals(subtask_status.get('attempted'), succeeded + failed)
self.assertEquals(subtask_status.get('succeeded'), succeeded)
self.assertEquals(subtask_status.get('skipped'), skipped)
self.assertEquals(subtask_status.get('failed'), failed)
self.assertEquals(subtask_status.get('retried_nomax'), retried_nomax)
self.assertEquals(subtask_status.get('retried_withmax'), retried_withmax)
self.assertEquals(subtask_status.get('state'), SUCCESS if succeeded > 0 else FAILURE)
def _test_run_with_task(self, task_class, action_name, total, succeeded, failed=0, skipped=0, retried_nomax=0, retried_withmax=0):
"""Run a task and check the number of emails processed."""
task_entry = self._create_input_entry()
parent_status = self._run_task_with_mock_celery(task_class, task_entry.id, task_entry.task_id)
# check return value
self.assertEquals(parent_status.get('total'), total)
self.assertEquals(parent_status.get('action_name'), action_name)
# compare with task_output entry in InstructorTask table:
entry = InstructorTask.objects.get(id=task_entry.id)
status = json.loads(entry.task_output)
self.assertEquals(status.get('attempted'), succeeded + failed)
self.assertEquals(status.get('succeeded'), succeeded)
self.assertEquals(status.get('skipped'), skipped)
self.assertEquals(status.get('failed'), failed)
self.assertEquals(status.get('total'), total)
self.assertEquals(status.get('action_name'), action_name)
self.assertGreater(status.get('duration_ms'), 0)
self.assertEquals(entry.task_state, SUCCESS)
self._assert_single_subtask_status(entry, succeeded, failed, skipped, retried_nomax, retried_withmax)
return entry
# TODO duplicated of _test_run_with_task
def _test_run_with_entry(self, task_class, task_entry, action_name, total, succeeded, failed=0, skipped=0, retried_nomax=0, retried_withmax=0):
"""Run a task and check the number of emails processed."""
parent_status = self._run_task_with_mock_celery(task_class, task_entry.id, task_entry.task_id)
# check return value
self.assertEquals(parent_status.get('total'), total)
self.assertEquals(parent_status.get('action_name'), action_name)
# compare with task_output entry in InstructorTask table:
entry = InstructorTask.objects.get(id=task_entry.id)
status = json.loads(entry.task_output)
self.assertEquals(status.get('attempted'), succeeded + failed)
self.assertEquals(status.get('succeeded'), succeeded)
self.assertEquals(status.get('skipped'), skipped)
self.assertEquals(status.get('failed'), failed)
self.assertEquals(status.get('total'), total)
self.assertEquals(status.get('action_name'), action_name)
self.assertGreater(status.get('duration_ms'), 0)
self.assertEquals(entry.task_state, SUCCESS)
self._assert_single_subtask_status(entry, succeeded, failed, skipped, retried_nomax, retried_withmax)
return entry
def test_successful(self):
# Select number of emails to fit into a single subtask.
num_emails = settings.BULK_EMAIL_EMAILS_PER_TASK
# We also send email to the instructor:
self._create_students(num_emails - 1)
with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:
get_conn.return_value.send_messages.side_effect = cycle([None])
self._test_run_with_task(send_bulk_course_email, 'emailed', num_emails, num_emails)
def test_successful_twice(self):
# Select number of emails to fit into a single subtask.
num_emails = settings.BULK_EMAIL_EMAILS_PER_TASK
# We also send email to the instructor:
self._create_students(num_emails - 1)
with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:
get_conn.return_value.send_messages.side_effect = cycle([None])
task_entry = self._test_run_with_task(send_bulk_course_email, 'emailed', num_emails, num_emails)
# submit the same task a second time, and confirm that it is not run again.
with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:
get_conn.return_value.send_messages.side_effect = cycle([Exception("This should not happen!")])
parent_status = self._run_task_with_mock_celery(send_bulk_course_email, task_entry.id, task_entry.task_id)
self.assertEquals(parent_status.get('total'), num_emails)
self.assertEquals(parent_status.get('succeeded'), num_emails)
self.assertEquals(parent_status.get('failed'), 0)
def test_unactivated_user(self):
# Select number of emails to fit into a single subtask.
num_emails = settings.BULK_EMAIL_EMAILS_PER_TASK
# We also send email to the instructor:
students = self._create_students(num_emails - 1)
# mark a student as not yet having activated their email:
student = students[0]
student.is_active = False
student.save()
with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:
get_conn.return_value.send_messages.side_effect = cycle([None])
self._test_run_with_task(send_bulk_course_email, 'emailed', num_emails - 1, num_emails - 1)
def test_disabled_user(self):
# Select number of emails to fit into a single subtask.
num_emails = settings.BULK_EMAIL_EMAILS_PER_TASK
# We also send email to the instructor:
students = self._create_students(num_emails - 1)
# mark a student disabled:
student = students[0]
UserStanding.objects.create(user=student, account_status=UserStanding.ACCOUNT_DISABLED, changed_by=student)
with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:
get_conn.return_value.send_messages.side_effect = cycle([None])
self._test_run_with_task(send_bulk_course_email, 'emailed', num_emails - 1, num_emails - 1)
def test_skipped(self):
# Select number of emails to fit into a single subtask.
num_emails = settings.BULK_EMAIL_EMAILS_PER_TASK
# We also send email to the instructor:
students = self._create_students(num_emails - 1)
# have every fourth student optout:
expected_skipped = int((num_emails + 3) / 4.0)
expected_succeeds = num_emails - expected_skipped
for index in range(0, num_emails, 4):
Optout.objects.create(user=students[index], course_id=self.course.id)
# mark some students as opting out
with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:
get_conn.return_value.send_messages.side_effect = cycle([None])
self._test_run_with_task(send_bulk_course_email, 'emailed', num_emails, expected_succeeds, skipped=expected_skipped)
def test_skipped_include_optout(self):
# Select number of emails to fit into a single subtask.
num_emails = settings.BULK_EMAIL_EMAILS_PER_TASK
# We also send email to the instructor:
students = self._create_students(num_emails - 1)
# have every fourth student optout and every eighth student force disabled:
expected_skipped = int((num_emails + 7) / 8.0)
expected_succeeds = num_emails - expected_skipped
for index in range(0, num_emails, 4):
if index % 8 == 0:
Optout.objects.create(user=students[index], course_id=self.course.id, force_disabled=True)
else:
Optout.objects.create(user=students[index], course_id=self.course.id)
# mark some students as opting out.
# But are skipped only students who is force-disabled.
with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:
get_conn.return_value.send_messages.side_effect = cycle([None])
task_entry = self._create_input_entry(to_option=SEND_TO_ALL_INCLUDE_OPTOUT)
self._test_run_with_entry(send_bulk_course_email, task_entry, 'emailed', num_emails, expected_succeeds, skipped=expected_skipped)
def _test_email_address_failures(self, exception):
"""Test that celery handles bad address errors by failing and not retrying."""
# Select number of emails to fit into a single subtask.
num_emails = settings.BULK_EMAIL_EMAILS_PER_TASK
# We also send email to the instructor:
self._create_students(num_emails - 1)
expected_fails = int((num_emails + 3) / 4.0)
expected_succeeds = num_emails - expected_fails
with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:
# have every fourth email fail due to some address failure:
get_conn.return_value.send_messages.side_effect = cycle([exception, None, None, None])
self._test_run_with_task(send_bulk_course_email, 'emailed', num_emails, expected_succeeds, failed=expected_fails)
def test_smtp_blacklisted_user(self):
# Test that celery handles permanent SMTPDataErrors by failing and not retrying.
self._test_email_address_failures(SMTPDataError(554, "Email address is blacklisted"))
def test_ses_blacklisted_user(self):
# Test that celery handles permanent SMTPDataErrors by failing and not retrying.
self._test_email_address_failures(SESAddressBlacklistedError(554, "Email address is blacklisted"))
def test_ses_illegal_address(self):
# Test that celery handles permanent SMTPDataErrors by failing and not retrying.
self._test_email_address_failures(SESIllegalAddressError(554, "Email address is illegal"))
def test_ses_local_address_character_error(self):
# Test that celery handles permanent SMTPDataErrors by failing and not retrying.
self._test_email_address_failures(SESLocalAddressCharacterError(554, "Email address contains a bad character"))
def test_ses_domain_ends_with_dot(self):
# Test that celery handles permanent SMTPDataErrors by failing and not retrying.
self._test_email_address_failures(SESDomainEndsWithDotError(554, "Email address ends with a dot"))
def _test_retry_after_limited_retry_error(self, exception):
"""Test that celery handles connection failures by retrying."""
# If we want the batch to succeed, we need to send fewer emails
# than the max retries, so that the max is not triggered.
num_emails = settings.BULK_EMAIL_MAX_RETRIES
# We also send email to the instructor:
self._create_students(num_emails - 1)
expected_fails = 0
expected_succeeds = num_emails
with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:
# Have every other mail attempt fail due to disconnection.
get_conn.return_value.send_messages.side_effect = cycle([exception, None])
self._test_run_with_task(
send_bulk_course_email,
'emailed',
num_emails,
expected_succeeds,
failed=expected_fails,
retried_withmax=num_emails
)
def _test_max_retry_limit_causes_failure(self, exception):
"""Test that celery can hit a maximum number of retries."""
# Doesn't really matter how many recipients, since we expect
# to fail on the first.
num_emails = 10
# We also send email to the instructor:
self._create_students(num_emails - 1)
expected_fails = num_emails
expected_succeeds = 0
with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:
# always fail to connect, triggering repeated retries until limit is hit:
get_conn.return_value.send_messages.side_effect = cycle([exception])
with patch('bulk_email.tasks.update_subtask_status', my_update_subtask_status):
self._test_run_with_task(
send_bulk_course_email,
'emailed',
num_emails,
expected_succeeds,
failed=expected_fails,
retried_withmax=(settings.BULK_EMAIL_MAX_RETRIES + 1)
)
def test_retry_after_smtp_disconnect(self):
self._test_retry_after_limited_retry_error(SMTPServerDisconnected(425, "Disconnecting"))
def test_max_retry_after_smtp_disconnect(self):
self._test_max_retry_limit_causes_failure(SMTPServerDisconnected(425, "Disconnecting"))
def test_retry_after_smtp_connect_error(self):
self._test_retry_after_limited_retry_error(SMTPConnectError(424, "Bad Connection"))
def test_max_retry_after_smtp_connect_error(self):
self._test_max_retry_limit_causes_failure(SMTPConnectError(424, "Bad Connection"))
def test_retry_after_aws_connect_error(self):
self._test_retry_after_limited_retry_error(AWSConnectionError("Unable to provide secure connection through proxy"))
def test_max_retry_after_aws_connect_error(self):
self._test_max_retry_limit_causes_failure(AWSConnectionError("Unable to provide secure connection through proxy"))
def test_retry_after_general_error(self):
self._test_retry_after_limited_retry_error(Exception("This is some random exception."))
def test_max_retry_after_general_error(self):
self._test_max_retry_limit_causes_failure(Exception("This is some random exception."))
def _test_retry_after_unlimited_retry_error(self, exception):
"""Test that celery handles throttling failures by retrying."""
num_emails = 8
# We also send email to the instructor:
self._create_students(num_emails - 1)
expected_fails = 0
expected_succeeds = num_emails
# Note that because celery in eager mode will call retries synchronously,
# each retry will increase the stack depth. It turns out that there is a
# maximum depth at which a RuntimeError is raised ("maximum recursion depth
# exceeded"). The maximum recursion depth is 90, so
# num_emails * expected_retries < 90.
expected_retries = 10
with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:
# Cycle through N throttling errors followed by a success.
get_conn.return_value.send_messages.side_effect = cycle(
chain(repeat(exception, expected_retries), [None])
)
self._test_run_with_task(
send_bulk_course_email,
'emailed',
num_emails,
expected_succeeds,
failed=expected_fails,
retried_nomax=(expected_retries * num_emails)
)
def test_retry_after_smtp_throttling_error(self):
self._test_retry_after_unlimited_retry_error(SMTPDataError(455, "Throttling: Sending rate exceeded"))
def test_retry_after_ses_throttling_error(self):
self._test_retry_after_unlimited_retry_error(SESMaxSendingRateExceededError(455, "Throttling: Sending rate exceeded"))
def _test_immediate_failure(self, exception):
"""Test that celery can hit a maximum number of retries."""
# Doesn't really matter how many recipients, since we expect
# to fail on the first.
num_emails = 10
# We also send email to the instructor:
self._create_students(num_emails - 1)
expected_fails = num_emails
expected_succeeds = 0
with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:
# always fail to connect, triggering repeated retries until limit is hit:
get_conn.return_value.send_messages.side_effect = cycle([exception])
self._test_run_with_task(
send_bulk_course_email,
'emailed',
num_emails,
expected_succeeds,
failed=expected_fails,
)
def test_failure_on_unhandled_smtp(self):
self._test_immediate_failure(SMTPAuthenticationError(403, "That password doesn't work!"))
def test_failure_on_ses_quota_exceeded(self):
self._test_immediate_failure(SESDailyQuotaExceededError(403, "You're done for the day!"))
def test_failure_on_ses_address_not_verified(self):
self._test_immediate_failure(SESAddressNotVerifiedError(403, "Who *are* you?"))
def test_failure_on_ses_identity_not_verified(self):
self._test_immediate_failure(SESIdentityNotVerifiedError(403, "May I please see an ID!"))
def test_failure_on_ses_domain_not_confirmed(self):
self._test_immediate_failure(SESDomainNotConfirmedError(403, "You're out of bounds!"))
|
nttks/jenkins-test
|
lms/djangoapps/bulk_email/tests/test_tasks.py
|
Python
|
agpl-3.0
| 24,083
| 0.003737
|
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
from os.path import join
from workspace_tools.toolchains import mbedToolchain
from workspace_tools.settings import ARM_BIN, ARM_INC, ARM_LIB, MY_ARM_CLIB, ARM_CPPLIB
from workspace_tools.hooks import hook_tool
from workspace_tools.settings import GOANNA_PATH
class ARM(mbedToolchain):
LINKER_EXT = '.sct'
LIBRARY_EXT = '.ar'
STD_LIB_NAME = "%s.ar"
DIAGNOSTIC_PATTERN = re.compile('"(?P<file>[^"]+)", line (?P<line>\d+)( \(column (?P<column>\d+)\)|): (?P<severity>Warning|Error): (?P<message>.+)')
DEP_PATTERN = re.compile('\S+:\s(?P<file>.+)\n')
def __init__(self, target, options=None, notify=None, macros=None, silent=False, extra_verbose=False):
mbedToolchain.__init__(self, target, options, notify, macros, silent, extra_verbose=extra_verbose)
if target.core == "Cortex-M0+":
cpu = "Cortex-M0"
elif target.core == "Cortex-M4F":
cpu = "Cortex-M4.fp"
elif target.core == "Cortex-M7F":
cpu = "Cortex-M7.fp.sp"
else:
cpu = target.core
main_cc = join(ARM_BIN, "armcc")
common = ["-c",
"--cpu=%s" % cpu, "--gnu",
"-Otime", "--split_sections", "--apcs=interwork",
"--brief_diagnostics", "--restrict", "--multibyte_chars"
]
if "save-asm" in self.options:
common.extend(["--asm", "--interleave"])
if "debug-info" in self.options:
common.append("-g")
common.append("-O0")
else:
common.append("-O3")
common_c = [
"--md", "--no_depend_system_headers",
'-I%s' % ARM_INC
]
self.asm = [main_cc] + common + ['-I%s' % ARM_INC]
if not "analyze" in self.options:
self.cc = [main_cc] + common + common_c + ["--c99"]
self.cppc = [main_cc] + common + common_c + ["--cpp", "--no_rtti"]
else:
self.cc = [join(GOANNA_PATH, "goannacc"), "--with-cc=" + main_cc.replace('\\', '/'), "--dialect=armcc", '--output-format="%s"' % self.GOANNA_FORMAT] + common + common_c + ["--c99"]
self.cppc= [join(GOANNA_PATH, "goannac++"), "--with-cxx=" + main_cc.replace('\\', '/'), "--dialect=armcc", '--output-format="%s"' % self.GOANNA_FORMAT] + common + common_c + ["--cpp", "--no_rtti"]
self.ld = [join(ARM_BIN, "armlink")]
self.sys_libs = []
self.ar = join(ARM_BIN, "armar")
self.elf2bin = join(ARM_BIN, "fromelf")
def remove_option(self, option):
for tool in [self.asm, self.cc, self.cppc]:
if option in tool:
tool.remove(option)
def assemble(self, source, object, includes):
# Preprocess first, then assemble
tempfile = object + '.E.s'
return [
self.asm + ['-D%s' % s for s in self.get_symbols() + self.macros] + ["-I%s" % i for i in includes] + ["-E", "-o", tempfile, source],
self.hook.get_cmdline_assembler(self.asm + ["-o", object, tempfile])
]
def parse_dependencies(self, dep_path):
dependencies = []
for line in open(dep_path).readlines():
match = ARM.DEP_PATTERN.match(line)
if match is not None:
dependencies.append(match.group('file'))
return dependencies
def parse_output(self, output):
for line in output.splitlines():
match = ARM.DIAGNOSTIC_PATTERN.match(line)
if match is not None:
self.cc_info(
match.group('severity').lower(),
match.group('file'),
match.group('line'),
match.group('message'),
target_name=self.target.name,
toolchain_name=self.name
)
match = self.goanna_parse_line(line)
if match is not None:
self.cc_info(
match.group('severity').lower(),
match.group('file'),
match.group('line'),
match.group('message')
)
def get_dep_opt(self, dep_path):
return ["--depend", dep_path]
def archive(self, objects, lib_path):
self.default_cmd([self.ar, '-r', lib_path] + objects)
def link(self, output, objects, libraries, lib_dirs, mem_map):
if len(lib_dirs):
args = ["-o", output, "--userlibpath", ",".join(lib_dirs), "--info=totals", "--list=.link_totals.txt"]
else:
args = ["-o", output, "--info=totals", "--list=.link_totals.txt"]
if mem_map:
args.extend(["--scatter", mem_map])
if hasattr(self.target, "link_cmdline_hook"):
args = self.target.link_cmdline_hook(self.__class__.__name__, args)
self.default_cmd(self.ld + args + objects + libraries + self.sys_libs)
@hook_tool
def binary(self, resources, elf, bin):
args = [self.elf2bin, '--bin', '-o', bin, elf]
if hasattr(self.target, "binary_cmdline_hook"):
args = self.target.binary_cmdline_hook(self.__class__.__name__, args)
self.default_cmd(args)
class ARM_STD(ARM):
def __init__(self, target, options=None, notify=None, macros=None, silent=False, extra_verbose=False):
ARM.__init__(self, target, options, notify, macros, silent, extra_verbose=extra_verbose)
self.cc += ["-D__ASSERT_MSG"]
self.cppc += ["-D__ASSERT_MSG"]
self.ld.append("--libpath=%s" % ARM_LIB)
class ARM_MICRO(ARM):
PATCHED_LIBRARY = False
def __init__(self, target, options=None, notify=None, macros=None, silent=False, extra_verbose=False):
ARM.__init__(self, target, options, notify, macros, silent, extra_verbose=extra_verbose)
# Compiler
self.asm += ["-D__MICROLIB"]
self.cc += ["--library_type=microlib", "-D__MICROLIB", "-D__ASSERT_MSG"]
self.cppc += ["--library_type=microlib", "-D__MICROLIB", "-D__ASSERT_MSG"]
# Linker
self.ld.append("--library_type=microlib")
# We had to patch microlib to add C++ support
# In later releases this patch should have entered mainline
if ARM_MICRO.PATCHED_LIBRARY:
self.ld.append("--noscanlib")
# System Libraries
self.sys_libs.extend([join(MY_ARM_CLIB, lib+".l") for lib in ["mc_p", "mf_p", "m_ps"]])
if target.core == "Cortex-M3":
self.sys_libs.extend([join(ARM_CPPLIB, lib+".l") for lib in ["cpp_ws", "cpprt_w"]])
elif target.core in ["Cortex-M0", "Cortex-M0+"]:
self.sys_libs.extend([join(ARM_CPPLIB, lib+".l") for lib in ["cpp_ps", "cpprt_p"]])
else:
self.ld.append("--libpath=%s" % ARM_LIB)
|
nabilbendafi/mbed
|
workspace_tools/toolchains/arm.py
|
Python
|
apache-2.0
| 7,354
| 0.005167
|
from monitor import Monitor
try:
from python_libtorrent import get_libtorrent
lt = get_libtorrent()
except Exception, e:
import libtorrent as lt
class Dispatcher(Monitor):
def __init__(self, client):
super(Dispatcher,self).__init__(client)
def do_start(self, th, ses):
self._th = th
self._ses=ses
self.start()
def run(self):
if not self._ses:
raise Exception('Invalid state, session is not initialized')
while self.running:
a=self._ses.wait_for_alert(1000)
if a:
alerts= self._ses.pop_alerts()
for alert in alerts:
with self.lock:
for cb in self.listeners:
cb(lt.alert.what(alert), alert)
|
ChopChopKodi/pelisalacarta
|
python/main-classic/lib/btserver/dispatcher.py
|
Python
|
gpl-3.0
| 805
| 0.006211
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016, 2018, 2020 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""The WAF plugin is useful to build waf based parts
waf bases projects are projects that drive configuration and build via
a local waf python helper - see https://github.com/waf-project/waf for more
details.
This plugin uses the common plugin keywords as well as those for "sources".
For more information check the 'plugins' topic for the former and the
'sources' topic for the latter.
In addition, this plugin uses the following plugin-specific keywords:
- configflags:
(list of strings)
configure flags to pass to the build such as those shown by running
./waf --help
"""
from snapcraft.plugins.v1 import PluginV1
class WafPlugin(PluginV1):
"""plugin to build via waf build system"""
@classmethod
def schema(cls):
schema = super().schema()
schema["properties"]["configflags"] = {
"type": "array",
"minitems": 1,
"uniqueItems": True,
"items": {"type": "string"},
"default": [],
}
schema["required"] = ["source"]
return schema
def __init__(self, name, options, project):
super().__init__(name, options, project)
self._setup_base_tools()
def _setup_base_tools(self):
self.build_packages.append("python-dev:native")
@classmethod
def get_build_properties(cls):
# Inform Snapcraft of the properties associated with building. If these
# change in the YAML Snapcraft will consider the build step dirty.
return ["configflags"]
def env(self, root):
env = super().env(root)
if self.project.is_cross_compiling:
env.extend(
[
"CC={}-gcc".format(self.project.arch_triplet),
"CXX={}-g++".format(self.project.arch_triplet),
]
)
return env
def enable_cross_compilation(self):
# Let snapcraft know that this plugin can cross-compile
# If the method isn't implemented an exception is raised
pass
def build(self):
super().build()
self.run(["./waf", "distclean"])
self.run(["./waf", "configure"] + self.options.configflags)
self.run(["./waf", "build"])
self.run(
["./waf", "install", "--destdir=" + self.installdir]
) # target from snappy env
|
chipaca/snapcraft
|
snapcraft/plugins/v1/waf.py
|
Python
|
gpl-3.0
| 3,034
| 0
|
from csv import DictWriter
from io import StringIO
import os
import unittest
from pathlib import Path
from unittest.mock import patch, Mock, DEFAULT
from pytest import fixture
from micall.core import remap
from micall.core.project_config import ProjectConfig
from micall.core.remap import is_first_read, is_short_read, \
MixedReferenceSplitter, write_remap_counts, convert_prelim, read_contigs
from micall.utils.externals import Bowtie2, Bowtie2Build
HXB2_NAME = "HIV1-B-FR-K03455-seed"
@fixture(name='projects', scope="session")
def load_projects():
yield ProjectConfig.loadDefault()
class IsShortReadTest(unittest.TestCase):
def assertCigarIsPrimer(self, cigar, is_primer_expected):
row = {'cigar': cigar}
max_primer_length = 29
self.assertEqual(is_primer_expected, is_short_read(row, max_primer_length))
def testIsPrimerForLongRead(self):
self.assertCigarIsPrimer('45M', False)
def testIsPrimerForShortRead(self):
self.assertCigarIsPrimer('10M', True)
def testIsPrimerForShortReadWithClipping(self):
self.assertCigarIsPrimer('45S10M', True)
def testIsPrimerForReadWithMultipleMatches(self):
self.assertCigarIsPrimer('10M3D45M', False)
class IsFirstReadTest(unittest.TestCase):
def testFirstRead(self):
flag = '99'
is_first_expected = True
is_first = is_first_read(flag)
self.assertEqual(is_first_expected, is_first)
def testSecondRead(self):
flag = '147'
is_first_expected = False
is_first = is_first_read(flag)
self.assertEqual(is_first_expected, is_first)
def testSmallFlag(self):
flag = '3'
is_first_expected = False
is_first = is_first_read(flag)
self.assertEqual(is_first_expected, is_first)
class SamToConseqsTest(unittest.TestCase):
def testSimple(self):
# SAM:qname, flag, rname, pos, mapq, cigar, rnext, pnext, tlen, seq, qual
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t1\t44\t12M\t=\t1\t12\tACAAGACCCAAC\tJJJJJJJJJJJJ\n"
)
expected_conseqs = {'test': 'ACAAGACCCAAC'}
conseqs = remap.sam_to_conseqs(sam_file)
self.assertDictEqual(expected_conseqs, conseqs)
def testOffset(self):
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t147\ttest\t4\t44\t12M\t=\t3\t-12\tACAAGACCCAAC\tJJJJJJJJJJJJ\n"
)
expected_conseqs = {'test': 'NNNACAAGACCCAAC'}
conseqs = remap.sam_to_conseqs(sam_file)
self.assertDictEqual(expected_conseqs, conseqs)
def testHeaders(self):
sam_file = StringIO(
"@SH\tsome header\n"
"@MHI\tmost headers are ignored, except SQ for sequence reference\n"
"@SQ\tSN:test\n"
"test1\t99\ttest\t1\t44\t3M\t=\t1\t3\tACA\tJJJ\n"
)
expected_conseqs = {'test': 'ACA'}
conseqs = remap.sam_to_conseqs(sam_file)
self.assertDictEqual(expected_conseqs, conseqs)
def testUnknownReferenceName(self):
sam_file = StringIO(
"@SQ\tSN:testX\n"
"test1\t99\ttestY\t1\t44\t12M\t=\t1\t3\tACA\tJJJ\n"
)
expected_conseqs = {}
conseqs = remap.sam_to_conseqs(sam_file)
self.assertDictEqual(expected_conseqs, conseqs)
def testHeaderFields(self):
sam_file = StringIO(
"@SQ\tOF:other field: ignored\tSN:test\n"
"test1\t99\ttest\t1\t44\t3M\t=\t1\t3\tACA\tJJJ\n"
)
expected_conseqs = {'test': 'ACA'}
conseqs = remap.sam_to_conseqs(sam_file)
self.assertDictEqual(expected_conseqs, conseqs)
def testExtraFields(self):
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t1\t44\t3M\t=\t1\t3\tACA\tJJJ\tAS:i:236\tNM:i:12\n"
)
expected_conseqs = {'test': 'ACA'}
conseqs = remap.sam_to_conseqs(sam_file)
self.assertDictEqual(expected_conseqs, conseqs)
def testMaxConsensus(self):
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t1\t44\t3M\t=\t1\t3\tACA\tJJJ\n"
"test2\t147\ttest\t1\t44\t3M\t=\t1\t-3\tACA\tJJJ\n"
"test3\t99\ttest\t1\t44\t3M\t=\t1\t3\tTCA\tJJJ\n"
)
expected_conseqs = {'test': 'ACA'}
conseqs = remap.sam_to_conseqs(sam_file)
self.assertDictEqual(expected_conseqs, conseqs)
def testTie(self):
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t1\t44\t3M\t=\t1\t3\tGCA\tJJJ\n"
"test2\t147\ttest\t1\t44\t3M\t=\t1\t-3\tTCA\tJJJ\n"
)
expected_conseqs = {'test': 'GCA'}
conseqs = remap.sam_to_conseqs(sam_file)
self.assertDictEqual(expected_conseqs, conseqs)
def testSoftClip(self):
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t1\t44\t3S5M1S\t=\t1\t9\tACAGGGAGA\tJJJJJJJJJ\n"
)
expected_conseqs = {'test': 'GGGAG'}
conseqs = remap.sam_to_conseqs(sam_file)
self.assertDictEqual(expected_conseqs, conseqs)
def testSimpleInsertion(self):
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t1\t44\t3M3I3M\t=\t1\t9\tACAGGGAGA\tJJJJJJJJJ\n"
)
expected_conseqs = {'test': 'ACAGGGAGA'}
conseqs = remap.sam_to_conseqs(sam_file)
self.assertDictEqual(expected_conseqs, conseqs)
def testLowQualityInsertion(self):
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t1\t44\t3M3I3M\t=\t1\t9\tACAGGGAGA\tJJJJ/JJJJ\n"
)
expected_conseqs = {'test': 'ACAAGA'}
conseqs = remap.sam_to_conseqs(sam_file, quality_cutoff=32)
self.assertDictEqual(expected_conseqs, conseqs)
def testInsertionAfterLowQuality(self):
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t1\t44\t3M3I3M\t=\t1\t9\tACAGGGAGA\tJJ/JJJJJJ\n"
)
expected_conseqs = {'test': 'ACNAGA'}
conseqs = remap.sam_to_conseqs(sam_file, quality_cutoff=32)
self.assertDictEqual(expected_conseqs, conseqs)
def testInsertionAndOffset(self):
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t1\t44\t3M3I3M\t=\t1\t9\tACAGGGAGA\tJJJJJJJJJJJJ\n"
"test2\t99\ttest\t5\t44\t5M\t=\t1\t5\tGACCC\tJJJJJ\n"
)
expected_conseqs = {'test': 'ACAGGGAGACCC'}
conseqs = remap.sam_to_conseqs(sam_file)
self.assertDictEqual(expected_conseqs, conseqs)
def testComplexInsertion(self):
# Insertions are ignored if not a multiple of three
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t1\t44\t3M1I3M2I6M\t=\t1\t12\tACAGAGAGGCCCAAC\tJJJJJJJJJJJJJJJ\n"
)
expected_conseqs = {'test': 'ACAAGACCCAAC'}
conseqs = remap.sam_to_conseqs(sam_file)
self.assertDictEqual(expected_conseqs, conseqs)
def testDeletion(self):
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t1\t44\t3M3D3M\t=\t3\t6\tACAGGG\tJJJJJJ\n"
)
expected_conseqs = {'test': 'ACAGGG'}
conseqs = remap.sam_to_conseqs(sam_file)
self.assertDictEqual(expected_conseqs, conseqs)
def testDeletionInSomeReads(self):
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t1\t44\t3M3D3M\t=\t3\t6\tACAGGG\tJJJJJJ\n"
"test2\t99\ttest\t1\t44\t3M3D3M\t=\t3\t6\tACAGGG\tJJJJJJ\n"
"test3\t99\ttest\t1\t44\t9M\t=\t3\t9\tACATTTGGG\tJJJJJJJJJ\n"
)
expected_conseqs = {'test': 'ACATTTGGG'}
conseqs = remap.sam_to_conseqs(sam_file)
self.assertDictEqual(expected_conseqs, conseqs)
def testDeletionWithFrameShift(self):
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t1\t44\t3M1D3M\t=\t3\t6\tACAGGG\tJJJJJJ\n"
)
expected_conseqs = {'test': 'ACA-GGG'}
conseqs = remap.sam_to_conseqs(sam_file)
self.assertDictEqual(expected_conseqs, conseqs)
def testBigDeletionWithFrameShift(self):
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t1\t44\t3M4D3M\t=\t3\t6\tACAGGG\tJJJJJJ\n"
)
expected_conseqs = {'test': 'ACA----GGG'}
conseqs = remap.sam_to_conseqs(sam_file)
self.assertDictEqual(expected_conseqs, conseqs)
def testOverlapsCountOnce(self):
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t1\t44\t3M\t=\t1\t3\tACG\tJJJ\n"
"test1\t147\ttest\t1\t44\t3M\t=\t1\t-3\tACG\tJJJ\n"
"test2\t99\ttest\t1\t44\t3M\t=\t1\t3\tACG\tJJJ\n"
"test2\t147\ttest\t1\t44\t3M\t=\t1\t-3\tACG\tJJJ\n"
"test3\t99\ttest\t1\t44\t3M\t=\t3\t3\tATG\tJJJ\n"
"test3\t147\ttest\t3\t44\t3M\t=\t1\t-3\tGCC\tJJJ\n"
"test4\t99\ttest\t1\t44\t3M\t=\t3\t3\tATG\tJJJ\n"
"test4\t147\ttest\t3\t44\t3M\t=\t1\t-3\tGCC\tJJJ\n"
"test5\t99\ttest\t1\t44\t3M\t=\t3\t3\tATG\tJJJ\n"
"test5\t147\ttest\t3\t44\t3M\t=\t1\t-3\tGCC\tJJJ\n"
)
expected_conseqs = {'test': 'ATGCC'}
conseqs = remap.sam_to_conseqs(sam_file)
self.assertDictEqual(expected_conseqs, conseqs)
def testReverseLeftOfForward(self):
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t2\t44\t1M\t=\t1\t1\tC\tJ\n"
"test1\t147\ttest\t1\t44\t1M\t=\t2\t-1\tA\tJ\n"
)
expected_conseqs = {'test': 'AC'}
conseqs = remap.sam_to_conseqs(sam_file)
self.assertDictEqual(expected_conseqs, conseqs)
def testPairMapsToTwoReferences(self):
sam_file = StringIO(
"@SQ\tSN:testX\n"
"@SQ\tSN:testY\n"
"test1\t99\ttestX\t1\t44\t3M\t=\t1\t3\tACG\tJJJ\n"
"test1\t147\ttestY\t1\t44\t3M\t=\t1\t-3\tACG\tJJJ\n"
)
expected_conseqs = {}
conseqs = remap.sam_to_conseqs(sam_file)
self.assertDictEqual(expected_conseqs, conseqs)
def testLowQuality(self):
# Note that we ignore the overlapped portion of the reverse read,
# even if it has higher quality.
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t1\t44\t3M\t=\t1\t3\tACG\tJ/J\n"
)
expected_conseqs = {'test': 'ANG'}
conseqs = remap.sam_to_conseqs(sam_file, quality_cutoff=32)
self.assertDictEqual(expected_conseqs, conseqs)
def testLowQualityAtEnd(self):
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t1\t44\t3M\t=\t1\t3\tACG\tJJ/\n"
)
expected_conseqs = {'test': 'ACN'}
conseqs = remap.sam_to_conseqs(sam_file, quality_cutoff=32)
self.assertDictEqual(expected_conseqs, conseqs)
def testLowQualityForward(self):
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t1\t44\t3M\t=\t3\t3\tATA\tJJA\n"
"test1\t147\ttest\t3\t44\t3M\t=\t1\t-3\tGCC\tJJJ\n"
)
expected_conseqs = {'test': 'ATGCC'}
conseqs = remap.sam_to_conseqs(sam_file)
self.assertDictEqual(expected_conseqs, conseqs)
def testAllLowQuality(self):
# SAM:qname, flag, rname, pos, mapq, cigar, rnext, pnext, tlen, seq, qual
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t147\ttest\t1\t24\t1M\t=\t1\t-1\tT\t#\n"
)
expected_conseqs = {}
conseqs = remap.sam_to_conseqs(sam_file, quality_cutoff=32)
self.assertDictEqual(expected_conseqs, conseqs)
def testBadPairFlag(self):
""" Even if the pair isn't concordant, still include in consensus.
SAM flag 145 does not have bit 2 for properly aligned.
"""
# SAM:qname, flag, rname, pos, mapq, cigar, rnext, pnext, tlen, seq, qual
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t145\ttest\t1\t24\t1M\t=\t1\t-1\tT\tF\n"
)
expected_conseqs = {'test': 'T'}
conseqs = remap.sam_to_conseqs(sam_file, quality_cutoff=32)
self.assertEqual(expected_conseqs, conseqs)
def testUnmappedFlag(self):
""" If the read is unmapped, don't include in consensus.
SAM flag 149 has bit 4 for unmapped. Region is irrelevant.
"""
# SAM:qname, flag, rname, pos, mapq, cigar, rnext, pnext, tlen, seq, qual
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t149\ttest\t1\t24\t1M\t=\t1\t-1\tT\tF\n"
)
expected_conseqs = {}
conseqs = remap.sam_to_conseqs(sam_file, quality_cutoff=32)
self.assertEqual(expected_conseqs, conseqs)
def testLowQualityAndDeletion(self):
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t1\t44\t3M3D3M\t=\t3\t6\tACAGGG\tJJJJJJ\n"
"test2\t99\ttest\t1\t44\t9M\t=\t3\t9\tACATTTGGG\tJJJ///JJJ\n"
)
expected_conseqs = {'test': 'ACANNNGGG'}
conseqs = remap.sam_to_conseqs(sam_file, quality_cutoff=32)
self.assertDictEqual(expected_conseqs, conseqs)
def testSeeds(self):
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t4\t44\t3M\t=\t10\t3\tTAT\tJJJ\n"
"test2\t99\ttest\t10\t44\t3M\t=\t4\t-3\tCAC\tJJJ\n"
)
seeds = {'test': 'ACATTTGGGCAC'}
expected_conseqs = {'test': 'ACATATGGGCAC'}
conseqs = remap.sam_to_conseqs(sam_file, seeds=seeds)
self.assertDictEqual(expected_conseqs, conseqs)
def testSeedsNeedSomeReads(self):
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t4\t44\t3M\t=\t10\t3\tTAT\tJJJ\n"
)
seeds = {'test': 'ACATTTGGGCAC',
'other': 'TATGCACCC'}
expected_conseqs = {'test': 'ACATATGGGCAC'}
conseqs = remap.sam_to_conseqs(sam_file, seeds=seeds)
self.assertDictEqual(expected_conseqs, conseqs)
def testSeedsWithLowQuality(self):
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t4\t44\t3M\t=\t10\t3\tTAT\tJJ/\n"
)
seeds = {'test': 'ACATTTGGGCAC'}
expected_conseqs = {'test': 'ACATATGGGCAC'}
conseqs = remap.sam_to_conseqs(sam_file,
seeds=seeds,
quality_cutoff=32)
self.assertDictEqual(expected_conseqs, conseqs)
def testSeedsWithPartialDeletion(self):
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t4\t44\t1M1D1M\t=\t10\t3\tTT\tJJ\n"
)
seeds = {'test': 'ACATTTGGGCAC'}
expected_conseqs = {'test': 'ACATTTGGGCAC'}
conseqs = remap.sam_to_conseqs(sam_file,
seeds=seeds,
quality_cutoff=32)
self.assertDictEqual(expected_conseqs, conseqs)
def testSeedsWithCodonDeletion(self):
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t1\t44\t3M3D3M\t=\t10\t6\tACAGGG\tJJJJJJ\n"
)
seeds = {'test': 'ACATTTGGGCAC'}
expected_conseqs = {'test': 'ACATTTGGGCAC'}
conseqs = remap.sam_to_conseqs(sam_file,
seeds=seeds,
quality_cutoff=32)
self.assertDictEqual(expected_conseqs, conseqs)
def testDebugReports(self):
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t1\t44\t3M3I9M\t=\t1\t12\tACTGGGAGACCCAAC\tJIJJJJJJJJJJJJJ\n"
"test1\t147\ttest\t1\t44\t3M3I9M\t=\t1\t-12\tACTGGGAGACCCAAC\tJIJJJJJJJJJJJJJ\n"
"test1\t99\ttest\t1\t44\t3M3I9M\t=\t1\t12\tATTGGGAGACCCAAC\tJHJJJJJJJJJJJJJ\n"
"test1\t147\ttest\t1\t44\t3M3I9M\t=\t1\t-12\tATTGGGAGACCCAAC\tJHJJJJJJJJJJJJJ\n"
)
reports = {('test', 2): None}
expected_reports = {('test', 2): 'H{C: 1, T: 1}, I{C: 1}'}
remap.sam_to_conseqs(sam_file, debug_reports=reports)
self.assertDictEqual(expected_reports, reports)
def testDebugReportsOnReverseRead(self):
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t1\t44\t3M3I2M\t=\t1\t8\tACTGGGAG\tJJJJJJJJ\n"
"test1\t147\ttest\t5\t44\t8M\t=\t1\t-8\tGACCCAAC\tJJJJJIJJ\n"
"test1\t99\ttest\t1\t44\t3M3I2M\t=\t1\t12\tATTGGGAG\tJJJJJJJJ\n"
"test1\t147\ttest\t5\t44\t8M\t=\t1\t-12\tGACCCAAC\tJJJJJHJJ\n"
)
reports = {('test', 10): None}
expected_reports = {('test', 10): 'H{A: 2}, I{A: 1}'}
remap.sam_to_conseqs(sam_file, debug_reports=reports)
self.assertDictEqual(expected_reports, reports)
def test_drop_drifters_seeds_converged():
relevant_conseqs = dict(test='ATGAGGAGTA',
other='ATGACCAGTA',
wayoff='ATGAGGGTAC')
original_seeds = dict(test='ATGAAGTA',
other='AAGCCGAA',
wayoff='TCATGTAC')
read_counts = dict(test=1, other=1, wayoff=1)
distance_report = {}
expected_seed_names = {'test'}
expected_distances = dict(test=dict(seed_dist=2,
other_dist=5,
other_seed='other'),
other=dict(seed_dist=4,
other_dist=2,
other_seed='test'),
wayoff=dict(seed_dist=4,
other_dist=3,
other_seed='test'))
remap.drop_drifters(relevant_conseqs,
original_seeds,
distance_report,
read_counts)
assert distance_report == expected_distances
assert set(relevant_conseqs) == expected_seed_names
def test_drop_drifters_seeds_converged_with_different_alignment():
""" Seeds have similar regions, but at different positions.
"""
relevant_conseqs = dict(test='ATGAGGAGTA',
other='ATGACCAGTA')
original_seeds = dict(test='ATGAAGTA',
other='TCTCTCTCTCAAGCCGAA')
read_counts = dict(test=1, other=1)
distance_report = {}
expected_seed_names = {'test'}
remap.drop_drifters(relevant_conseqs,
original_seeds,
distance_report,
read_counts)
assert set(relevant_conseqs) == expected_seed_names
def test_sam_to_conseqs_seeds_converged_with_different_alignment_and_gap():
""" Gaps between areas with coverage.
"""
sam_file = StringIO(
f"@SQ\tSN:test\tSN:other\n"
f"test1\t99\ttest\t1\t44\t10M\t=\t1\t10\tATGAGGAGTA\tJJJJJJJJJJJJ\n"
f"other1\t99\tother\t11\t44\t5M\t=\t1\t5\tATGAC\tJJJJJJJ\n"
f"other2\t99\tother\t26\t44\t5M\t=\t1\t5\tCAGTA\tJJJJJJJ\n"
)
seeds = {'test': 'ATGAAGTA',
'other': 'TCTCTCTCTCAAGCTATATATATACGAA'}
expected_conseqs = {'test': 'ATGAGGAGTA'}
conseqs = remap.sam_to_conseqs(sam_file,
seeds=seeds,
original_seeds=seeds,
is_filtered=True)
assert conseqs == expected_conseqs
def test_drop_drifters_seeds_converged_with_confusing_gap():
""" Reads match other seed well, but with a big gap.
"""
# vvvvvvvv
relevant_conseqs = dict(test='ATGTCGTA',
# |||||||||
other='AAGCTATAT')
original_seeds = dict(
# vvv??vvv
test='ATGAAGTA',
# vvvvv ||||||||| vvv
other='ATGTCTCTCTCTCAAGCTATATATATACGAAGTA')
read_counts = dict(test=1, other=1)
distance_report = {}
expected_seed_names = {'test', 'other'}
remap.drop_drifters(relevant_conseqs,
original_seeds,
distance_report,
read_counts)
assert set(relevant_conseqs) == expected_seed_names
def test_drop_drifters_seeds_converged_plus_other_low_coverage():
""" Portion with decent coverage has converged, other hasn't.
"""
relevant_conseqs = dict(test='ATGAGGAGTA', other='ATGACCAGTA')
original_seeds = dict(test='ATGAAGTACTCTCT', other='AAGCCGAAGTGTGT')
read_counts = dict(test=2, other=3)
distance_report = {}
expected_seed_names = {'test'}
remap.drop_drifters(relevant_conseqs,
original_seeds,
distance_report,
read_counts)
assert set(relevant_conseqs) == expected_seed_names
def test_drop_drifters_seeds_both_converged(projects):
""" Both references are now closer to the other seed than the start.
Don't drop both. Keep test because it has more reads.
"""
hxb2_end = projects.getReference(HXB2_NAME)[-200:]
relevant_conseqs = dict(test='AAGCCGTA' + hxb2_end,
# ^ ^^
other='ATGAAGTA' + hxb2_end,
# ^ ^^ ^
unrelated='GGGTTTGGG' + hxb2_end)
original_seeds = dict(test='ATGAAGTA' + hxb2_end,
other='AAGCCGAA' + hxb2_end,
unrelated='GGGTTTGGG' + hxb2_end)
read_counts = dict(test=2, other=1, unrelated=1)
distance_report = {}
expected_seed_names = {'test', 'unrelated'}
expected_distances = dict(test=dict(seed_dist=3,
other_dist=1,
other_seed='other'),
other=dict(seed_dist=4,
other_dist=0,
other_seed='test'),
unrelated=dict(seed_dist=0,
other_dist=7,
other_seed='other'))
remap.drop_drifters(relevant_conseqs,
original_seeds,
distance_report,
read_counts)
assert distance_report == expected_distances
assert set(relevant_conseqs) == expected_seed_names
def test_sam_to_conseqs_all_seeds_low_coverage():
""" Multiple seeds mapped, but none have good coverage.
Choose most reads.
"""
sam_file = StringIO(
"@SQ\tSN:test\tSN:other\n"
"test1\t99\ttest\t1\t44\t10M\t=\t1\t10\tATGAGGAGTA\tJJJJJJJJJJJJ\n"
"test2\t99\ttest\t11\t44\t6M\t=\t1\t10\tCTCTCT\tJJJJJJ\n"
"other1\t99\tother\t1\t44\t10M\t=\t1\t10\tATGACCAGTA\tJJJJJJJJJJJJ\n"
)
seeds = {'test': 'ATGAAGTACTCTCT',
'other': 'AAGCCGAAGTGTGT'}
expected_conseqs = {'test': 'ATGAGGAGTACTCTCT'}
conseqs = remap.sam_to_conseqs(sam_file,
seeds=seeds,
original_seeds=seeds,
is_filtered=True,
filter_coverage=2)
assert conseqs == expected_conseqs
def test_extract_relevant_seeds():
expectations = [ # (aligned_conseq, aligned_seed, expected_seed)
('ACTG',
'ATTG',
'ATTG'),
('-ACTG-',
'CATTGT',
'ATTG'),
('-AC-TG--',
'CATATGT',
'ATATG'),
('-AC-TG-AT-',
'CATATGTATC',
'ATATGTAT'),
('--T--',
'CATAT',
'T'),
('TACG----',
'----GGCC',
'')]
for aligned_conseq, aligned_seed, expected_seed in expectations:
relevant = remap.extract_relevant_seed(aligned_conseq, aligned_seed)
assert (aligned_conseq,
aligned_seed,
relevant) == (aligned_conseq, aligned_seed, expected_seed)
def test_sam_to_conseqs_nothing_mapped():
sam_file = StringIO(
"@SQ\tSN:test\tSN:other\n"
)
seeds = {'test': 'ATGAAGTACTCTCT',
'other': 'AAGCCGAAGTGTGT'}
expected_conseqs = {}
conseqs = remap.sam_to_conseqs(sam_file,
seeds=seeds,
original_seeds=seeds,
is_filtered=True,
filter_coverage=2)
assert conseqs == expected_conseqs
class MixedReferenceMemorySplitter(MixedReferenceSplitter):
""" Dummy class to hold split reads in memory. Useful for testing. """
def __init__(self, work_path):
super().__init__(work_path)
self.is_closed = True
def create_split_file(self, refname, direction):
self.is_closed = False
return StringIO()
def close_split_file(self, split_file):
self.is_closed = True
# noinspection DuplicatedCode
class MixedReferenceSplitterTest(unittest.TestCase):
def setUp(self):
super(MixedReferenceSplitterTest, self).setUp()
self.addTypeEqualityFunc(str, self.assertMultiLineEqual)
self.work_path = os.path.dirname(__file__)
def testSimple(self):
sam_file = StringIO(
"@SQ\tSN:r\n"
"r1\t99\tr\t1\t44\t3M\t=\t1\t3\tACG\tJJJ\n"
"r1\t147\tr\t1\t44\t3M\t=\t1\t-3\tACG\tJJJ\n")
expected_rows = [
["r1", "99", "r", "1", "44", "3M", "=", "1", "3", "ACG", "JJJ"],
["r1", "147", "r", "1", "44", "3M", "=", "1", "-3", "ACG", "JJJ"]]
splitter = MixedReferenceSplitter(self.work_path)
rows = list(splitter.split(sam_file))
self.assertEqual(expected_rows, rows)
def testTrimOptionalFields(self):
sam_file = StringIO(
"@SQ\tSN:r\n"
"r1\t99\tr\t1\t44\t3M\t=\t1\t3\tACG\tJJJ\tAS:i:100\n"
"r1\t147\tr\t1\t44\t3M\t=\t1\t-3\tACG\tJJJ\tYS:Z:UP\n")
expected_rows = [
["r1", "99", "r", "1", "44", "3M", "=", "1", "3", "ACG", "JJJ"],
["r1", "147", "r", "1", "44", "3M", "=", "1", "-3", "ACG", "JJJ"]]
splitter = MixedReferenceSplitter(self.work_path)
rows = list(splitter.split(sam_file))
self.assertEqual(expected_rows, rows)
def testUnmapped(self):
sam_file = StringIO(
"@SQ\tSN:r\n"
"r1\t107\tr\t1\t44\t3M\t*\t1\t3\tACG\tJJJ\n"
"r1\t149\t*\t*\t*\t*\tr\t*\t*\tACG\tJJJ\n")
expected_rows = []
splitter = MixedReferenceSplitter(self.work_path)
rows = list(splitter.split(sam_file))
self.assertEqual(expected_rows, rows)
def testSplit(self):
""" If a pair is split over two references, choose one.
Use the reference that gave the higher mapq score.
"""
sam_file = StringIO(
"@SQ\tSN:r\n"
"r1\t99\tRX\t1\t44\t3M\t=\t1\t3\tACG\tJJJ\tAS:i:100\n"
"r1\t147\tRX\t1\t44\t3M\t=\t1\t-3\tACG\tJJJ\tYS:Z:UP\n"
"r2\t99\tRX\t1\t44\t3M\tRY\t1\t3\tACG\tJJJ\tAS:i:100\n"
"r2\t147\tRY\t1\t11\t3M\tRX\t1\t-3\tACC\tJJK\tAS:i:200\n")
expected_rows = [
["r1", "99", "RX", "1", "44", "3M", "=", "1", "3", "ACG", "JJJ"],
["r1", "147", "RX", "1", "44", "3M", "=", "1", "-3", "ACG", "JJJ"]]
expected_fastq1 = """\
@r2
ACG
+
JJJ
"""
expected_fastq2 = """\
@r2
GGT
+
KJJ
"""
splitter = MixedReferenceMemorySplitter(self.work_path)
rows = list(splitter.split(sam_file))
is_closed = splitter.is_closed
splits = splitter.splits
self.assertEqual(expected_rows, rows)
self.assertEqual(['RX'], list(splits.keys()))
fastq1, fastq2 = splits['RX']
self.assertEqual(expected_fastq1, fastq1.getvalue())
self.assertEqual(expected_fastq2, fastq2.getvalue())
self.assertTrue(is_closed)
def testTiedMapQ(self):
""" If both mates have the same mapq, choose higher alignment score.
Use the reference that gave the higher mapq score.
"""
sam_file = StringIO(
"@SQ\tSN:r\n"
"r1\t99\tRX\t1\t44\t3M\t=\t1\t3\tACG\tJJJ\tAS:i:100\n"
"r1\t147\tRX\t1\t44\t3M\t=\t1\t-3\tACG\tJJJ\tYS:Z:UP\n"
"r2\t99\tRX\t1\t11\t3M\tRY\t1\t3\tACG\tJJJ\tAS:i:100\n"
"r2\t147\tRY\t1\t11\t3M\tRX\t1\t-3\tACC\tJJK\tAS:i:200\n")
splitter = MixedReferenceMemorySplitter(self.work_path)
list(splitter.split(sam_file))
splits = splitter.splits
self.assertEqual(['RY'], list(splits.keys()))
def testWalk(self):
sam_file = StringIO(
"@SQ\tSN:r\n"
"r1\t99\tRX\t1\t44\t3M\t=\t1\t3\tACG\tJJJ\tAS:i:100\n"
"r1\t147\tRX\t1\t44\t3M\t=\t1\t-3\tACG\tJJJ\tYS:Z:UP\n"
"r2\t99\tRX\t1\t44\t3M\tRY\t1\t3\tACG\tJJJ\tAS:i:100\n"
"r2\t147\tRY\t1\t44\t3M\tRX\t1\t-3\tACT\tKKK\tAS:i:200\n")
expected_rows = [
["r1", "99", "RX", "1", "44", "3M", "=", "1", "3", "ACG", "JJJ", "AS:i:100"],
["r1", "147", "RX", "1", "44", "3M", "=", "1", "-3", "ACG", "JJJ", "YS:Z:UP"],
["r2", "99", "RX", "1", "44", "3M", "RY", "1", "3", "ACG", "JJJ", "AS:i:100"],
["r2", "147", "RY", "1", "44", "3M", "RX", "1", "-3", "ACT", "KKK", "AS:i:200"]]
splitter = MixedReferenceMemorySplitter(self.work_path)
rows = list(splitter.walk(sam_file))
splits = splitter.splits
self.assertEqual(expected_rows, rows)
self.assertEqual({}, splits)
class RemapCountsTest(unittest.TestCase):
def test_simple(self):
report = StringIO()
writer = DictWriter(
report,
['type', 'count', 'seed_dist', 'other_dist', 'other_seed'],
lineterminator=os.linesep)
counts = {'r1': 100, 'r2': 200}
expected_report = """\
prelim r1,100,,,
prelim r2,200,,,
"""
write_remap_counts(writer, counts, 'prelim')
self.assertEqual(expected_report, report.getvalue())
def test_distance(self):
report = StringIO()
writer = DictWriter(
report,
['type', 'count', 'seed_dist', 'other_dist', 'other_seed'],
lineterminator=os.linesep)
counts = {'r1': 100, 'r2': 200}
distance_report = {'r1': {'seed_dist': 1,
'other_dist': 10,
'other_seed': 'r2'},
'r2': {'seed_dist': 2,
'other_dist': 20,
'other_seed': 'r1'}}
expected_report = """\
remap r1,100,1,10,r2
remap r2,200,2,20,r1
"""
write_remap_counts(writer, counts, 'remap', distance_report)
self.assertEqual(expected_report, report.getvalue())
# noinspection DuplicatedCode
class ConvertPrelimTest(unittest.TestCase):
def setUp(self):
self.projects = ProjectConfig()
self.projects.load(StringIO("""\
{
"regions": {
"R1-seed": {
"seed_group": "main",
"reference": ["ACTAAAGGG"]
},
"R2-seed": {
"seed_group": "main",
"reference": ["ACTAAAGGGAAA"]
}
}
}
"""))
self.sam_file = StringIO()
self.remap_counts = StringIO()
self.remap_counts_writer = DictWriter(
self.remap_counts,
['type', 'filtered_count', 'count'],
lineterminator=os.linesep)
self.remap_counts_writer.writeheader()
def test_simple(self):
prelim_csv = StringIO("""\
qname,flag,rname,pos,mapq,cigar,rnext,pnext,tlen,seq,qual
example1,89,R1-seed,1,0,9M,=,1,0,AAACCCTTT,BBBBBBBBB
""")
count_threshold = 2
expected_sam_file = """\
@HD VN:1.0 SO:unsorted
@SQ SN:R1-seed LN:9
@SQ SN:R2-seed LN:12
@PG ID:bowtie2 PN:bowtie2 VN:2.2.3 CL:""
example1\t89\tR1-seed\t1\t0\t9M\t=\t1\t0\tAAACCCTTT\tBBBBBBBBB
"""
expected_remap_counts = """\
type,filtered_count,count
prelim R1-seed,0,1
"""
expected_seed_counts = {}
seed_counts = convert_prelim(prelim_csv,
self.sam_file,
self.remap_counts_writer,
count_threshold,
self.projects)
self.assertEqual(expected_sam_file, self.sam_file.getvalue())
self.assertEqual(expected_remap_counts, self.remap_counts.getvalue())
self.assertEqual(expected_seed_counts, seed_counts)
def test_two_regions(self):
prelim_csv = StringIO("""\
qname,flag,rname,pos,mapq,cigar,rnext,pnext,tlen,seq,qual
example1,89,R1-seed,1,0,9M,=,1,0,AAACCCTTT,BBBBBBBBB
example2,89,R2-seed,1,0,9M,=,1,0,AAAACCTTT,BBBBBBBBB
example3,89,R2-seed,1,0,9M,=,1,0,AAAAACTTT,BBBBBBBBB
""")
count_threshold = 2
expected_sam_file = """\
@HD VN:1.0 SO:unsorted
@SQ SN:R1-seed LN:9
@SQ SN:R2-seed LN:12
@PG ID:bowtie2 PN:bowtie2 VN:2.2.3 CL:""
example1\t89\tR1-seed\t1\t0\t9M\t=\t1\t0\tAAACCCTTT\tBBBBBBBBB
example2\t89\tR2-seed\t1\t0\t9M\t=\t1\t0\tAAAACCTTT\tBBBBBBBBB
example3\t89\tR2-seed\t1\t0\t9M\t=\t1\t0\tAAAAACTTT\tBBBBBBBBB
"""
expected_remap_counts = """\
type,filtered_count,count
prelim R1-seed,0,1
prelim R2-seed,0,2
"""
expected_seed_counts = {}
seed_counts = convert_prelim(prelim_csv,
self.sam_file,
self.remap_counts_writer,
count_threshold,
self.projects)
self.assertEqual(expected_sam_file, self.sam_file.getvalue())
self.assertEqual(expected_remap_counts, self.remap_counts.getvalue())
self.assertEqual(expected_seed_counts, seed_counts)
def test_long_reads(self):
self.maxDiff = None
prelim_csv = StringIO("""\
qname,flag,rname,pos,mapq,cigar,rnext,pnext,tlen,seq,qual
example1,89,R1-seed,1,0,54M,=,1,0,\
AAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTT,\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
example2,89,R1-seed,1,0,54M,=,1,0,\
AAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTT,\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
""")
count_threshold = 2
expected_sam_file = """\
@HD VN:1.0 SO:unsorted
@SQ SN:R1-seed LN:9
@SQ SN:R2-seed LN:12
@PG ID:bowtie2 PN:bowtie2 VN:2.2.3 CL:""
example1\t89\tR1-seed\t1\t0\t54M\t=\t1\t0\t\
AAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTT\t\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
example2\t89\tR1-seed\t1\t0\t54M\t=\t1\t0\t\
AAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTT\t\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
"""
expected_remap_counts = """\
type,filtered_count,count
prelim R1-seed,2,2
"""
expected_seed_counts = {'R1-seed': 2}
seed_counts = convert_prelim(prelim_csv,
self.sam_file,
self.remap_counts_writer,
count_threshold,
self.projects)
self.assertEqual(expected_sam_file, self.sam_file.getvalue())
self.assertEqual(expected_remap_counts, self.remap_counts.getvalue())
self.assertEqual(expected_seed_counts, seed_counts)
def test_star_region(self):
self.maxDiff = None
prelim_csv = StringIO("""\
qname,flag,rname,pos,mapq,cigar,rnext,pnext,tlen,seq,qual
example1,89,R1-seed,1,0,54M,=,1,0,\
AAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTT,\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
example2,89,R1-seed,1,0,54M,=,1,0,\
AAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTT,\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
example3,93,*,*,*,*,*,*,*,\
AAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTT,\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
""")
count_threshold = 2
expected_sam_file = """\
@HD VN:1.0 SO:unsorted
@SQ SN:R1-seed LN:9
@SQ SN:R2-seed LN:12
@PG ID:bowtie2 PN:bowtie2 VN:2.2.3 CL:""
example1\t89\tR1-seed\t1\t0\t54M\t=\t1\t0\t\
AAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTT\t\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
example2\t89\tR1-seed\t1\t0\t54M\t=\t1\t0\t\
AAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTT\t\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
example3\t93\t*\t*\t*\t*\t*\t*\t*\t\
AAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTT\t\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
"""
expected_remap_counts = """\
type,filtered_count,count
prelim *,0,1
prelim R1-seed,2,2
"""
expected_seed_counts = {'R1-seed': 2}
seed_counts = convert_prelim(prelim_csv,
self.sam_file,
self.remap_counts_writer,
count_threshold,
self.projects)
self.assertEqual(expected_sam_file, self.sam_file.getvalue())
self.assertEqual(expected_remap_counts, self.remap_counts.getvalue())
self.assertEqual(expected_seed_counts, seed_counts)
def test_best_in_group(self):
self.maxDiff = None
prelim_csv = StringIO("""\
qname,flag,rname,pos,mapq,cigar,rnext,pnext,tlen,seq,qual
example1,89,R1-seed,1,0,54M,=,1,0,\
AAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTT,\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
example2,89,R2-seed,1,0,54M,=,1,0,\
AAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTT,\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
example3,89,R1-seed,1,0,54M,=,1,0,\
AAAAAATTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTT,\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
example4,89,R2-seed,1,0,54M,=,1,0,\
AAAAAAAATAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTT,\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
example5,89,R2-seed,1,0,54M,=,1,0,\
AAAAAAAAAAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTT,\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
""")
count_threshold = 2
expected_sam_file = """\
@HD VN:1.0 SO:unsorted
@SQ SN:R1-seed LN:9
@SQ SN:R2-seed LN:12
@PG ID:bowtie2 PN:bowtie2 VN:2.2.3 CL:""
example1\t89\tR1-seed\t1\t0\t54M\t=\t1\t0\t\
AAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTT\t\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
example2\t89\tR2-seed\t1\t0\t54M\t=\t1\t0\t\
AAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTT\t\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
example3\t89\tR1-seed\t1\t0\t54M\t=\t1\t0\t\
AAAAAATTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTT\t\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
example4\t89\tR2-seed\t1\t0\t54M\t=\t1\t0\t\
AAAAAAAATAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTT\t\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
example5\t89\tR2-seed\t1\t0\t54M\t=\t1\t0\t\
AAAAAAAAAAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTT\t\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
"""
expected_remap_counts = """\
type,filtered_count,count
prelim R1-seed,2,2
prelim R2-seed,3,3
"""
expected_seed_counts = {'R2-seed': 3}
seed_counts = convert_prelim(prelim_csv,
self.sam_file,
self.remap_counts_writer,
count_threshold,
self.projects)
self.assertEqual(expected_sam_file, self.sam_file.getvalue())
self.assertEqual(expected_remap_counts, self.remap_counts.getvalue())
self.assertEqual(expected_seed_counts, seed_counts)
def test_unmapped_read(self):
self.maxDiff = None
prelim_csv = StringIO("""\
qname,flag,rname,pos,mapq,cigar,rnext,pnext,tlen,seq,qual
example1,89,R1-seed,1,0,54M,=,1,0,\
AAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTT,\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
example2,93,R1-seed,1,0,54M,=,1,0,\
AAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTT,\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
""")
count_threshold = 2
expected_sam_file = """\
@HD VN:1.0 SO:unsorted
@SQ SN:R1-seed LN:9
@SQ SN:R2-seed LN:12
@PG ID:bowtie2 PN:bowtie2 VN:2.2.3 CL:""
example1\t89\tR1-seed\t1\t0\t54M\t=\t1\t0\t\
AAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTTAAACCCTTT\t\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
example2\t93\tR1-seed\t1\t0\t54M\t=\t1\t0\t\
AAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTTAAAACCTTT\t\
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
"""
expected_remap_counts = """\
type,filtered_count,count
prelim *,0,1
prelim R1-seed,1,1
"""
expected_seed_counts = {}
seed_counts = convert_prelim(prelim_csv,
self.sam_file,
self.remap_counts_writer,
count_threshold,
self.projects)
self.assertEqual(expected_sam_file, self.sam_file.getvalue())
self.assertEqual(expected_remap_counts, self.remap_counts.getvalue())
self.assertEqual(expected_seed_counts, seed_counts)
class RemapTest(unittest.TestCase):
def setUp(self):
patcher = patch.multiple(Bowtie2, __init__=Mock(return_value=None), yield_output=DEFAULT)
self.bowtie2_output = []
mocks = patcher.start()
mocks['yield_output'].return_value = self.bowtie2_output
self.addCleanup(patcher.stop)
patcher = patch.multiple(Bowtie2Build,
__init__=Mock(return_value=None),
build=DEFAULT)
patcher.start()
self.addCleanup(patcher.stop)
mock_refs = {'R1': 'GTGGG',
'R2': 'ACAAA'}
patcher = patch.object(ProjectConfig, 'loadDefault')
mock_projects = patcher.start()
self.addCleanup(patcher.stop)
mock_projects.return_value.getAllReferences.return_value = mock_refs
mock_projects.return_value.getReference.side_effect = mock_refs.__getitem__
patcher = patch('micall.core.remap.is_short_read', Mock(return_value=False))
patcher.start()
self.addCleanup(patcher.stop)
def test_good_contig(self):
contigs_csv = StringIO("""\
ref,match,group_ref,contig
R1,1.0,R1,TCACCAGGACAGCGGGTTGAATTCCTCGTGCAAGCGTGGAA
""")
self.bowtie2_output.extend([
"read1\t99\t1-R1\t1\t44\t5M\t=\t1\t-81\tGTGGG\tAAAAA\n",
"read1\t147\t1-R1\t1\t44\t5M\t=\t1\t-81\tGTGGG\tAAAAA\n",
"read2\t99\t1-R1\t1\t44\t5M\t=\t1\t-81\tGTGGG\tAAAAA\n",
"read2\t147\t1-R1\t1\t44\t5M\t=\t1\t-81\tGTGGG\tAAAAA\n",
"read3\t99\t1-R1\t1\t44\t5M\t=\t1\t-81\tGTGGG\tAAAAA\n",
"read3\t147\t1-R1\t1\t44\t5M\t=\t1\t-81\tGTGGG\tAAAAA\n",
"read4\t99\t1-R1\t1\t44\t5M\t=\t1\t-81\tGTGGG\tAAAAA\n",
"read4\t147\t1-R1\t1\t44\t5M\t=\t1\t-81\tGTGGG\tAAAAA\n",
"read5\t77\t*\t0\t0\t*\t*\t0\t0\tGTAAA\tAAAAA\n",
"read5\t141\t*\t0\t0\t*\t*\t0\t0\tGTAAA\tAAAAA\n"])
expected_remap_counts_csv = """\
type,count,filtered_count,seed_dist,other_dist,other_seed
raw,20,,,,
remap 1-R1,8,,,,
remap-final 1-R1,8,,,,
unmapped,2,,,,
"""
expected_remap_csv = """\
qname,flag,rname,pos,mapq,cigar,rnext,pnext,tlen,seq,qual
read1,99,1-R1,1,44,5M,=,1,-81,GTGGG,AAAAA
read1,147,1-R1,1,44,5M,=,1,-81,GTGGG,AAAAA
read2,99,1-R1,1,44,5M,=,1,-81,GTGGG,AAAAA
read2,147,1-R1,1,44,5M,=,1,-81,GTGGG,AAAAA
read3,99,1-R1,1,44,5M,=,1,-81,GTGGG,AAAAA
read3,147,1-R1,1,44,5M,=,1,-81,GTGGG,AAAAA
read4,99,1-R1,1,44,5M,=,1,-81,GTGGG,AAAAA
read4,147,1-R1,1,44,5M,=,1,-81,GTGGG,AAAAA
"""
self.assertMapsToContigs(contigs_csv, expected_remap_csv, expected_remap_counts_csv)
def assertMapsToContigs(self, contigs_csv, expected_remap_csv, expected_remap_counts_csv):
test_path = os.path.dirname(__file__)
remap_counts_csv = StringIO()
remap_csv = StringIO()
remap.map_to_contigs(
os.path.join(test_path,
'microtest',
'1234A-V3LOOP_S1_L001_R1_001.fastq'),
os.path.join(test_path,
'microtest',
'1234A-V3LOOP_S1_L001_R2_001.fastq'),
contigs_csv,
remap_csv,
remap_counts_csv,
StringIO(),
StringIO(),
StringIO(),
work_path=os.path.join(test_path, 'working'))
self.assertEqual(expected_remap_counts_csv, remap_counts_csv.getvalue())
self.assertEqual(expected_remap_csv, remap_csv.getvalue())
def test_bad_contig(self):
contigs_csv = StringIO("""\
ref,match,group_ref,contig
R1,1.0,R1,TCACCAGGACAGCGGGTTGAATTCCTCGTGCAAGCGTGGAA
R2,0.2,R2,TCACCAGGACAGCGGGTTGAATTCCTCGTGCAAGCGTGGAA
""")
self.bowtie2_output.extend([
"read1\t99\t1-R1\t1\t44\t5M\t=\t1\t-81\tGTGGG\tAAAAA\n",
"read1\t147\t1-R1\t1\t44\t5M\t=\t1\t-81\tGTGGG\tAAAAA\n",
"read2\t99\t1-R1\t1\t44\t5M\t=\t1\t-81\tGTGGG\tAAAAA\n",
"read2\t147\t1-R1\t1\t44\t5M\t=\t1\t-81\tGTGGG\tAAAAA\n",
"read3\t99\t1-R1\t1\t44\t5M\t=\t1\t-81\tGTGGG\tAAAAA\n",
"read3\t147\t1-R1\t1\t44\t5M\t=\t1\t-81\tGTGGG\tAAAAA\n",
"read4\t99\t2-R2-partial\t1\t44\t5M\t=\t1\t-81\tGTGGG\tAAAAA\n",
"read4\t147\t2-R2-partial\t1\t44\t5M\t=\t1\t-81\tGTGGG\tAAAAA\n",
"read5\t77\t*\t0\t0\t*\t*\t0\t0\tGTAAA\tAAAAA\n",
"read5\t141\t*\t0\t0\t*\t*\t0\t0\tGTAAA\tAAAAA\n"])
expected_remap_counts_csv = """\
type,count,filtered_count,seed_dist,other_dist,other_seed
raw,20,,,,
remap 1-R1,6,,,,
remap 2-R2-partial,2,,,,
remap-final 1-R1,6,,,,
remap-final 2-R2-partial,2,,,,
unmapped,2,,,,
"""
expected_remap_csv = """\
qname,flag,rname,pos,mapq,cigar,rnext,pnext,tlen,seq,qual
read1,99,1-R1,1,44,5M,=,1,-81,GTGGG,AAAAA
read1,147,1-R1,1,44,5M,=,1,-81,GTGGG,AAAAA
read2,99,1-R1,1,44,5M,=,1,-81,GTGGG,AAAAA
read2,147,1-R1,1,44,5M,=,1,-81,GTGGG,AAAAA
read3,99,1-R1,1,44,5M,=,1,-81,GTGGG,AAAAA
read3,147,1-R1,1,44,5M,=,1,-81,GTGGG,AAAAA
read4,99,2-R2-partial,1,44,5M,=,1,-81,GTGGG,AAAAA
read4,147,2-R2-partial,1,44,5M,=,1,-81,GTGGG,AAAAA
"""
self.assertMapsToContigs(contigs_csv, expected_remap_csv, expected_remap_counts_csv)
def test_excluded_contig(self):
test_path = os.path.dirname(__file__)
contigs_csv = StringIO("""\
ref,match,group_ref,contig
R1,1.0,R1,GTGGG
R2,1.0,R2,ACAAA
""")
self.bowtie2_output.extend([
"read1\t99\t1-R1\t1\t44\t5M\t=\t1\t-81\tGTGGG\tAAAAA\n",
"read1\t147\t1-R1\t1\t44\t5M\t=\t1\t-81\tGTGGG\tAAAAA\n",
"read2\t99\t1-R1\t1\t44\t5M\t=\t1\t-81\tGTGGG\tAAAAA\n",
"read2\t147\t1-R1\t1\t44\t5M\t=\t1\t-81\tGTGGG\tAAAAA\n",
"read3\t99\t1-R1\t1\t44\t5M\t=\t1\t-81\tGTGGG\tAAAAA\n",
"read3\t147\t1-R1\t1\t44\t5M\t=\t1\t-81\tGTGGG\tAAAAA\n",
"read4\t99\t2-R2-excluded\t1\t44\t5M\t=\t1\t-81\tGTGGG\tAAAAA\n",
"read4\t147\t2-R2-excluded\t1\t44\t5M\t=\t1\t-81\tGTGGG\tAAAAA\n",
"read5\t77\t*\t0\t0\t*\t*\t0\t0\tGTAAA\tAAAAA\n",
"read5\t141\t*\t0\t0\t*\t*\t0\t0\tGTAAA\tAAAAA\n"])
excluded_seeds = ['R2']
expected_remap_counts_csv = """\
type,count,filtered_count,seed_dist,other_dist,other_seed
raw,20,,,,
remap 1-R1,6,,,,
remap 2-R2-excluded,2,,,,
remap-final 1-R1,6,,,,
remap-final 2-R2-excluded,2,,,,
unmapped,2,,,,
"""
expected_remap_csv = """\
qname,flag,rname,pos,mapq,cigar,rnext,pnext,tlen,seq,qual
read1,99,1-R1,1,44,5M,=,1,-81,GTGGG,AAAAA
read1,147,1-R1,1,44,5M,=,1,-81,GTGGG,AAAAA
read2,99,1-R1,1,44,5M,=,1,-81,GTGGG,AAAAA
read2,147,1-R1,1,44,5M,=,1,-81,GTGGG,AAAAA
read3,99,1-R1,1,44,5M,=,1,-81,GTGGG,AAAAA
read3,147,1-R1,1,44,5M,=,1,-81,GTGGG,AAAAA
"""
expected_remap_conseq_csv = """\
region,sequence
1-R1,GTGGG
"""
remap_counts_csv = StringIO()
remap_csv = StringIO()
remap_conseq_csv = StringIO()
remap.map_to_contigs(
os.path.join(test_path,
'microtest',
'1234A-V3LOOP_S1_L001_R1_001.fastq'),
os.path.join(test_path,
'microtest',
'1234A-V3LOOP_S1_L001_R2_001.fastq'),
contigs_csv,
remap_csv,
remap_counts_csv,
remap_conseq_csv,
StringIO(),
StringIO(),
work_path=os.path.join(test_path, 'working'),
excluded_seeds=excluded_seeds)
self.assertEqual(expected_remap_counts_csv, remap_counts_csv.getvalue())
self.assertEqual(expected_remap_csv, remap_csv.getvalue())
self.assertEqual(expected_remap_conseq_csv, remap_conseq_csv.getvalue())
def test_read_contigs(projects):
contigs_csv = StringIO("""\
ref,match,group_ref,contig
HCV-1a,1.0,HCV-1a,TCACCAGGACAGCGGGTTGAATTCCTCGTGCAAGCGTGGAA
HCV-2b,1.0,HCV-2b,GCCCGCCCCCTGATGGGGGCGACACTCCGCCA
""")
expected_conseqs = {
'1-HCV-1a': 'TCACCAGGACAGCGGGTTGAATTCCTCGTGCAAGCGTGGAA',
'2-HCV-2b': 'GCCCGCCCCCTGATGGGGGCGACACTCCGCCA'}
conseqs = read_contigs(contigs_csv)
assert expected_conseqs == conseqs
def test_read_contigs_filter():
contigs_csv = StringIO("""\
ref,match,group_ref,contig
HCV-1a,0.24,HCV-1a,TCACCAGGACAGCGGGTTGAATTCCTCGTGCAAGCGTGGAA
HCV-2b,0.25,HCV-2b,GCCCGCCCCCTGATGGGGGCGACACTCCGCCA
""")
expected_conseqs = {
'1-HCV-1a-partial': 'TCACCAGGACAGCGGGTTGAATTCCTCGTGCAAGCGTGGAA',
'2-HCV-2b': 'GCCCGCCCCCTGATGGGGGCGACACTCCGCCA'}
conseqs = read_contigs(contigs_csv)
assert expected_conseqs == conseqs
def test_read_contigs_reversed(projects):
contigs_csv = StringIO("""\
ref,match,group_ref,contig
HCV-1a,-1.0,HCV-1a,TCACCAGGACAGCGGGTTGAATTCCTCGTGCAAGCGTGGAA
HCV-2b,-0.1,HCV-2b,GCCCGCCCCCTGATGGGGGCGACACTCCGCCA
""")
expected_conseqs = {
'1-HCV-1a-reversed': 'TCACCAGGACAGCGGGTTGAATTCCTCGTGCAAGCGTGGAA',
'2-HCV-2b-reversed': 'GCCCGCCCCCTGATGGGGGCGACACTCCGCCA'}
conseqs = read_contigs(contigs_csv)
assert expected_conseqs == conseqs
def test_read_contigs_excluded():
contigs_csv = StringIO("""\
ref,match,group_ref,contig
HCV-1a,1.0,HCV-1a,TCACCAGGACAGCGGGTTGAATTCCTCGTGCAAGCGTGGAA
HLA-B-seed,0.02,HLA-B-seed,ATGCGGGTCACGGCACCCCGAACCGT
""")
excluded_seeds = ['HLA-B-seed']
expected_conseqs = {
'1-HCV-1a': 'TCACCAGGACAGCGGGTTGAATTCCTCGTGCAAGCGTGGAA',
'2-HLA-B-seed-excluded': 'ATGCGGGTCACGGCACCCCGAACCGT'}
conseqs = read_contigs(contigs_csv, excluded_seeds)
assert expected_conseqs == conseqs
def test_read_contigs_mutations():
contigs_csv = StringIO("""\
ref,match,group_ref,contig
HCV-1a,1.0,HCV-1a,TCACCAGGACAGCGGGTTGAATTCCTCGTGCAAGCGTGGAA
HCV-2b,1.0,HCV-2b,GCCCGACCCATGATGGGGGCGACACTCCGCCA
""")
expected_conseqs = {
'1-HCV-1a': 'TCACCAGGACAGCGGGTTGAATTCCTCGTGCAAGCGTGGAA',
'2-HCV-2b': 'GCCCGACCCATGATGGGGGCGACACTCCGCCA'}
conseqs = read_contigs(contigs_csv)
assert expected_conseqs == conseqs
def test_read_contigs_merged():
contigs_csv = StringIO("""\
ref,match,group_ref,contig
HCV-1a,1.0,HCV-1a,TCACCAGGACAGCGGGTTGAATTCCTCGTGCAAGCGTGGAA
HCV-2b,1.0,HCV-2b,GCCCGCCCCCTGATGGGGGCGACACTCCTCCA
HCV-2a,1.0,HCV-2b,CTCCACCATGAATCACTCCCCTG
""")
# Changes: ^G->A ^G->T
# TODO: Merge contigs 2 and 3, because they overlap.
# 'GCCCGCCCCCTGATGGGGGCGACACTCCTCCATGAATCACTCCCCTG'
# Change: ^
expected_conseqs = {
'1-HCV-1a': 'TCACCAGGACAGCGGGTTGAATTCCTCGTGCAAGCGTGGAA',
'2-HCV-2b': 'GCCCGCCCCCTGATGGGGGCGACACTCCTCCA',
'3-HCV-2a': 'CTCCACCATGAATCACTCCCCTG'}
conseqs = read_contigs(contigs_csv)
assert expected_conseqs == conseqs
def test_read_contigs_untrimmed_left():
""" Don't trim contigs that extend past reference start. """
contigs_csv = StringIO("""\
ref,match,group_ref,contig
HCV-1a,1.0,HCV-1a,TCACCAGGACAGCGGGTTGAATTCCTCGTGCAAGCGTGGAA
HCV-2b,1.0,HCV-2b,TAGACATATTACCGCCCGCCCCCTGATGGGGGCGACACTCCGCCATGAATCACTCCCCTGT
""")
expected_conseqs = {
'1-HCV-1a': 'TCACCAGGACAGCGGGTTGAATTCCTCGTGCAAGCGTGGAA',
'2-HCV-2b': 'TAGACATATTACCGCCCGCCCCCTGATGGGGGCGACACTCCGCCATGAATCACTCCCCTGT'}
conseqs = read_contigs(contigs_csv)
assert expected_conseqs == conseqs
def test_read_contigs_untrimmed_right():
""" Don't trim contigs that extend past reference end. """
contigs_csv = StringIO("""\
ref,match,group_ref,contig
HCV-1a,1.0,HCV-1a,TCACCAGGACAGCGGGTTGAATTCCTCGTGCAAGCGTGGAA
HCV-2b,1.0,HCV-2b,TAGTTTCCGTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTAGACATATTACC
""")
expected_conseqs = {
'1-HCV-1a': 'TCACCAGGACAGCGGGTTGAATTCCTCGTGCAAGCGTGGAA',
'2-HCV-2b': 'TAGTTTCCGTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTAGACATATTACC'}
conseqs = read_contigs(contigs_csv)
assert expected_conseqs == conseqs
def test_full_remap(tmp_path):
""" Test the full process of the remapping step. """
microtest_path = Path(__file__).parent / 'microtest'
fastq1 = str(microtest_path / '1234A-V3LOOP_S1_L001_R1_001.fastq')
fastq2 = str(microtest_path / '1234A-V3LOOP_S1_L001_R2_001.fastq')
prelim_csv = StringIO("""\
qname,flag,rname,pos,mapq,cigar,rnext,pnext,tlen,seq,qual
M01234:01:000000000-AAAAA:1:1101:01234:0001,99,HIV1-C-BR-JX140663-seed,6535,36,51M,=,6535,-51,\
TGCACAAGACCCAACAACAATACAAGAAAAAGTATAAGGATAGGACCAGGA,AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
M01234:01:000000000-AAAAA:1:1101:01234:0001,147,HIV1-C-BR-JX140663-seed,6535,36,51M,=,6535,-51,\
TGCACAAGACCCAACAACAATACAAGAAAAAGTATAAGGATAGGACCAGGA,AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
""")
remap_csv = StringIO()
remap_counts_csv = StringIO()
remap_conseq_csv = StringIO()
unmapped1_fastq = StringIO()
unmapped2_fastq = StringIO()
expected_remap_counts = """\
type,count,filtered_count,seed_dist,other_dist,other_seed
raw,20,,,,
prelim HIV1-C-BR-JX140663-seed,2,2,,,
remap-1 HIV1-C-BR-JX140663-seed,20,,,,
remap-final HIV1-C-BR-JX140663-seed,20,,,,
unmapped,0,,,,
"""
remap.remap(fastq1,
fastq2,
prelim_csv,
remap_csv,
remap_counts_csv,
remap_conseq_csv,
unmapped1_fastq,
unmapped2_fastq,
count_threshold=1,
work_path=tmp_path)
assert remap_counts_csv.getvalue() == expected_remap_counts
|
cfe-lab/MiCall
|
micall/tests/test_remap.py
|
Python
|
agpl-3.0
| 54,983
| 0.000527
|
import re
simple_cmd_match = re.compile(r'\\([^\\]+?)\{(.*?)\}')
graphics_cmd_match = re.compile(r'\\includegraphics\[.*?\]?\{(.*?)\}')
begin_cmd_match = re.compile(r'\\begin{([^}]+?)}(?:(?:\[([^\]]+?)\])|.*)')
newcmd_match = re.compile(r'\\.+?\{(.*?)\}\{(.*)\}')
# newcmd_match_with_var = re.compile(r'\\[^\\]+?\{(.*?)\}\{(.*?)\}')
vars_match = re.compile(r'\{(.+?)\}')
def get_vars(line):
res = list()
open_braces = 0
one_var = ''
for char in line.strip():
if char == '}':
open_braces -= 1
if open_braces > 0:
one_var += char
elif open_braces == 0 and one_var:
res.append(one_var)
one_var = ''
if char == '{':
open_braces += 1
return res
class FileIter:
def __init__(self, filename):
self.fn = filename
self.f = open(self.fn, 'r')
def get_line(self):
for line in self.f:
yield line
self.f.close()
|
floriangeigl/arxiv_converter
|
tex_utils.py
|
Python
|
gpl-3.0
| 969
| 0.002064
|
# -*- coding: utf-8 -*-
'''
Module for handling openstack neutron calls.
:maintainer: <akilesh1597@gmail.com>
:maturity: new
:platform: all
:optdepends: - neutronclient Python adapter
:configuration: This module is not usable until the following are specified
either in a pillar or in the minion's config file::
keystone.user: admin
keystone.password: verybadpass
keystone.tenant: admin
keystone.tenant_id: f80919baedab48ec8931f200c65a50df
keystone.insecure: False #(optional)
keystone.auth_url: 'http://127.0.0.1:5000/v2.0/'
If configuration for multiple openstack accounts is required, they can be
set up as different configuration profiles:
For example::
openstack1:
keystone.user: admin
keystone.password: verybadpass
keystone.tenant: admin
keystone.tenant_id: f80919baedab48ec8931f200c65a50df
keystone.auth_url: 'http://127.0.0.1:5000/v2.0/'
openstack2:
keystone.user: admin
keystone.password: verybadpass
keystone.tenant: admin
keystone.tenant_id: f80919baedab48ec8931f200c65a50df
keystone.auth_url: 'http://127.0.0.2:5000/v2.0/'
With this configuration in place, any of the neutron functions can make
use of a configuration profile by declaring it explicitly.
For example::
salt '*' neutron.list_subnets profile=openstack1
Please check 'https://wiki.openstack.org/wiki/Neutron/APIv2-specification'
for the correct arguments to the api
'''
import logging
from functools import wraps
LOG = logging.getLogger(__name__)
# Import third party libs
HAS_NEUTRON = False
try:
from neutronclient.v2_0 import client
HAS_NEUTRON = True
except ImportError:
pass
__opts__ = {}
def __virtual__():
'''
Only load this module if neutron
is installed on this minion.
'''
if HAS_NEUTRON:
return 'neutron'
return False
def _autheticate(func_name):
'''
Authenticate requests with the salt keystone module and format return data
'''
@wraps(func_name)
def decorator_method(*args, **kwargs):
'''
Authenticate request and format return data
'''
connection_args = {'profile': kwargs.get('profile', None)}
nkwargs = {}
for kwarg in kwargs:
if 'connection_' in kwarg:
connection_args.update({kwarg: kwargs[kwarg]})
elif '__' not in kwarg:
nkwargs.update({kwarg: kwargs[kwarg]})
kstone = __salt__['keystone.auth'](**connection_args)
token = kstone.auth_token
endpoint = kstone.service_catalog.url_for(
service_type='network',
endpoint_type='publicURL')
neutron_interface = client.Client(
endpoint_url=endpoint, token=token)
LOG.error('calling with args ' + str(args))
LOG.error('calling with kwargs ' + str(nkwargs))
return_data = func_name(neutron_interface, *args, **nkwargs)
LOG.error('got return data ' + str(return_data))
if isinstance(return_data, list):
# format list as a dict for rendering
return {data.get('name', None) or data['id']: data
for data in return_data}
return return_data
return decorator_method
@_autheticate
def list_floatingips(neutron_interface, **kwargs):
'''
list all floatingips
CLI Example:
.. code-block:: bash
salt '*' neutron.list_floatingips
'''
return neutron_interface.list_floatingips(**kwargs)['floatingips']
@_autheticate
def list_security_groups(neutron_interface, **kwargs):
'''
list all security_groups
CLI Example:
.. code-block:: bash
salt '*' neutron.list_security_groups
'''
return neutron_interface.list_security_groups(**kwargs)['security_groups']
@_autheticate
def list_subnets(neutron_interface, **kwargs):
'''
list all subnets
CLI Example:
.. code-block:: bash
salt '*' neutron.list_subnets
'''
return neutron_interface.list_subnets(**kwargs)['subnets']
@_autheticate
def list_networks(neutron_interface, **kwargs):
'''
list all networks
CLI Example:
.. code-block:: bash
salt '*' neutron.list_networks
'''
return neutron_interface.list_networks(**kwargs)['networks']
@_autheticate
def list_ports(neutron_interface, **kwargs):
'''
list all ports
CLI Example:
.. code-block:: bash
salt '*' neutron.list_ports
'''
return neutron_interface.list_ports(**kwargs)['ports']
@_autheticate
def list_routers(neutron_interface, **kwargs):
'''
list all routers
CLI Example:
.. code-block:: bash
salt '*' neutron.list_routers
'''
return neutron_interface.list_routers(**kwargs)['routers']
@_autheticate
def update_floatingip(neutron_interface, fip, port_id=None):
'''
update floating IP. Should be used to associate and disassociate
floating IP with instance
CLI Example:
.. code-block:: bash
to associate with an instance's port
salt '*' neutron.update_floatingip openstack-floatingip-id port-id
to disassociate from an instance's port
salt '*' neutron.update_floatingip openstack-floatingip-id
'''
neutron_interface.update_floatingip(fip, {"floatingip":
{"port_id": port_id}})
@_autheticate
def update_subnet(neutron_interface, subnet_id, **subnet_params):
'''
update given subnet
CLI Example:
.. code-block:: bash
salt '*' neutron.update_subnet openstack-subnet-id name='new_name'
'''
neutron_interface.update_subnet(subnet_id, {'subnet': subnet_params})
@_autheticate
def update_router(neutron_interface, router_id, **router_params):
'''
update given router
CLI Example:
.. code-block:: bash
salt '*' neutron.update_router openstack-router-id name='new_name'
external_gateway='openstack-network-id' administrative_state=true
'''
neutron_interface.update_router(router_id, {'router': router_params})
@_autheticate
def router_gateway_set(neutron_interface, router_id, external_gateway):
'''
Set external gateway for a router
CLI Example:
.. code-block:: bash
salt '*' neutron.update_router openstack-router-id openstack-network-id
'''
neutron_interface.update_router(
router_id, {'router': {'external_gateway_info':
{'network_id': external_gateway}}})
@_autheticate
def router_gateway_clear(neutron_interface, router_id):
'''
Clear external gateway for a router
CLI Example:
.. code-block:: bash
salt '*' neutron.update_router openstack-router-id
'''
neutron_interface.update_router(
router_id, {'router': {'external_gateway_info': None}})
@_autheticate
def create_router(neutron_interface, **router_params):
'''
Create OpenStack Neutron router
CLI Example:
.. code-block:: bash
salt '*' neutron.create_router name=R1
'''
response = neutron_interface.create_router({'router': router_params})
if 'router' in response and 'id' in response['router']:
return response['router']['id']
@_autheticate
def router_add_interface(neutron_interface, router_id, subnet_id):
'''
Attach router to a subnet
CLI Example:
.. code-block:: bash
salt '*' neutron.router_add_interface openstack-router-id subnet-id
'''
neutron_interface.add_interface_router(router_id, {'subnet_id': subnet_id})
@_autheticate
def router_rem_interface(neutron_interface, router_id, subnet_id):
'''
Dettach router from a subnet
CLI Example:
.. code-block:: bash
salt '*' neutron.router_rem_interface openstack-router-id subnet-id
'''
neutron_interface.remove_interface_router(
router_id, {'subnet_id': subnet_id})
@_autheticate
def create_security_group(neutron_interface, **sg_params):
'''
Create a new security group
CLI Example:
.. code-block:: bash
salt '*' neutron.create_security_group name='new_rule'
description='test rule'
'''
response = neutron_interface.create_security_group(
{'security_group': sg_params})
if 'security_group' in response and 'id' in response['security_group']:
return response['security_group']['id']
@_autheticate
def create_security_group_rule(neutron_interface, **rule_params):
'''
Create a rule entry for a security group
CLI Example:
.. code-block:: bash
salt '*' neutron.create_security_group_rule
'''
neutron_interface.create_security_group_rule(
{'security_group_rule': rule_params})
@_autheticate
def create_floatingip(neutron_interface, **floatingip_params):
'''
Create a new floating IP
CLI Example:
.. code-block:: bash
salt '*' neutron.create_floatingip floating_network_id=ext-net-id
'''
response = neutron_interface.create_floatingip(
{'floatingip': floatingip_params})
if 'floatingip' in response and 'id' in response['floatingip']:
return response['floatingip']['id']
@_autheticate
def create_subnet(neutron_interface, **subnet_params):
'''
Create a new subnet in OpenStack
CLI Example:
.. code-block:: bash
salt '*' neutron.create_subnet name='subnet name'
network_id='openstack-network-id' cidr='192.168.10.0/24' \\
gateway_ip='192.168.10.1' ip_version='4' enable_dhcp=false \\
start_ip='192.168.10.10' end_ip='192.168.10.20'
'''
if 'start_ip' in subnet_params:
subnet_params.update(
{'allocation_pools': [{'start': subnet_params.pop('start_ip'),
'end': subnet_params.pop('end_ip', None)}]})
response = neutron_interface.create_subnet({'subnet': subnet_params})
if 'subnet' in response and 'id' in response['subnet']:
return response['subnet']['id']
@_autheticate
def create_network(neutron_interface, **network_params):
'''
Create a new network segment in OpenStack
CLI Example:
.. code-block:: bash
salt '*' neutron.create_network name=External
provider_network_type=flat provider_physical_network=ext
'''
network_params = {param.replace('_', ':', 1):
network_params[param] for param in network_params}
response = neutron_interface.create_network({'network': network_params})
if 'network' in response and 'id' in response['network']:
return response['network']['id']
@_autheticate
def create_port(neutron_interface, **port_params):
'''
Create a new port in OpenStack
CLI Example:
.. code-block:: bash
salt '*' neutron.create_port network_id='openstack-network-id'
'''
response = neutron_interface.create_port({'port': port_params})
if 'port' in response and 'id' in response['port']:
return response['port']['id']
@_autheticate
def update_port(neutron_interface, port_id, **port_params):
'''
Create a new port in OpenStack
CLI Example:
.. code-block:: bash
salt '*' neutron.update_port name='new_port_name'
'''
neutron_interface.update_port(port_id, {'port': port_params})
@_autheticate
def delete_floatingip(neutron_interface, floating_ip_id):
'''
delete a floating IP
CLI Example:
.. code-block:: bash
salt '*' neutron.delete_floatingip openstack-floating-ip-id
'''
neutron_interface.delete_floatingip(floating_ip_id)
@_autheticate
def delete_security_group(neutron_interface, sg_id):
'''
delete a security group
CLI Example:
.. code-block:: bash
salt '*' neutron.delete_security_group openstack-security-group-id
'''
neutron_interface.delete_security_group(sg_id)
@_autheticate
def delete_security_group_rule(neutron_interface, rule):
'''
delete a security group rule. pass all rule params that match the rule
to be deleted
CLI Example:
.. code-block:: bash
salt '*' neutron.delete_security_group_rule direction='ingress'
ethertype='ipv4' security_group_id='openstack-security-group-id'
port_range_min=100 port_range_max=4096 protocol='tcp'
remote_group_id='default'
'''
sg_rules = neutron_interface.list_security_group_rules(
security_group_id=rule['security_group_id'])
for sg_rule in sg_rules['security_group_rules']:
sgr_id = sg_rule.pop('id')
if sg_rule == rule:
neutron_interface.delete_security_group_rule(sgr_id)
@_autheticate
def delete_subnet(neutron_interface, subnet_id):
'''
delete given subnet
CLI Example:
.. code-block:: bash
salt '*' neutron.delete_subnet openstack-subnet-id
'''
neutron_interface.delete_subnet(subnet_id)
@_autheticate
def delete_network(neutron_interface, network_id):
'''
delete given network
CLI Example:
.. code-block:: bash
salt '*' neutron.delete_network openstack-network-id
'''
neutron_interface.delete_network(network_id)
@_autheticate
def delete_router(neutron_interface, router_id):
'''
delete given router
CLI Example:
.. code-block:: bash
salt '*' neutron.delete_router openstack-router-id
'''
neutron_interface.delete_router(router_id)
|
CSSCorp/openstack-automation
|
file_root/_modules/neutron.py
|
Python
|
gpl-2.0
| 13,539
| 0
|
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf.urls.static import static
from .views import HomeView
# Uncomment the next two lines to enable the admin:
admin.autodiscover()
urlpatterns = (
static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) +
patterns(
'',
url('^$', HomeView.as_view(), name='home'),
url(r'^admin/', include(admin.site.urls)),
url(r'^social/', include('socialregistration.urls', namespace='socialregistration')),
)
)
|
jairtrejo/doko
|
app/rohan/urls.py
|
Python
|
mit
| 581
| 0.001721
|
# -*- coding: utf-8 -*-
#
# django-cachalot documentation build configuration file, created by
# sphinx-quickstart on Tue Oct 28 22:46:50 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
sys.path.insert(0, os.path.abspath('..'))
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
import cachalot
# This sets up Django, necessary for autodoc
import runtests
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'django-cachalot'
copyright = '2014-2016, Bertrand Bordage'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '%s.%s' % cachalot.VERSION[:2]
# The full version, including alpha/beta/rc tags.
release = cachalot.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-cachalotdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'django-cachalot.tex', u'django-cachalot Documentation',
u'Bertrand Bordage', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-cachalot', u'django-cachalot Documentation',
[u'Bertrand Bordage'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-cachalot', u'django-cachalot Documentation',
u'Bertrand Bordage', 'django-cachalot', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
BertrandBordage/django-cachalot
|
docs/conf.py
|
Python
|
bsd-3-clause
| 8,766
| 0.006046
|
from sqlalchemy import *
from migrate import *
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
product = Table('product', pre_meta,
Column('id', INTEGER, primary_key=True, nullable=False),
Column('product_name', VARCHAR),
Column('bar_code', INTEGER),
Column('price', NUMERIC),
Column('picture_id', INTEGER),
Column('category', VARCHAR),
Column('inprice', NUMERIC),
Column('size', VARCHAR),
Column('supply', INTEGER),
)
product = Table('product', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('product_name', String),
Column('category', String),
Column('bar_code', Integer),
Column('size', String),
Column('inprice', Numeric),
Column('price', Numeric),
Column('supply_id', Integer),
Column('picture_id', Integer),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
pre_meta.tables['product'].columns['supply'].drop()
post_meta.tables['product'].columns['supply_id'].create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
pre_meta.tables['product'].columns['supply'].create()
post_meta.tables['product'].columns['supply_id'].drop()
|
dogsaur/SMS
|
db_repository/versions/007_migration.py
|
Python
|
mit
| 1,457
| 0.001373
|
import pytest
import six
from mock import call, patch
from tests import utils
from week_parser.base import parse_row, parse_week, populate_extra_data
from week_parser.main import PrettyPrinter
def test_populate_extra_data_no_days():
"""
If we haven't found any days data, there is not extra data to add
"""
week_data = {}
description = '__DESCRIPTION__'
populate_extra_data(week_data, description)
assert week_data == {}
def test_populate_extra_data_square_day():
"""
If we have found a 'square' day, the description and square value is added
"""
value = 7
week_data = {'mon': {'value': value}}
description = '__DESCRIPTION__'
populate_extra_data(week_data, description)
assert week_data == {
'mon': {
'value': value,
'square': value ** 2,
'description': '{} {}'.format(description, value ** 2)
}
}
def test_populate_extra_data_double_day():
"""
If we have found a 'double' day, the description and double value is added
"""
value = 7
week_data = {'thu': {'value': value}}
description = '__DESCRIPTION__'
populate_extra_data(week_data, description)
assert week_data == {
'thu': {
'value': value,
'double': value * 2,
'description': '{} {}'.format(description, value * 2)
}
}
def test_parse_row_single_day():
"""
If the input row contains a single day, it is outputted
"""
row = {'mon': '3', 'description': '__DESCRIPTION__'}
with patch('week_parser.base.populate_extra_data') as mock_populate:
week_data = parse_row(row)
assert week_data == {'mon': {'day': 'mon', 'value': 3}}
assert mock_populate.call_args_list == [call(week_data, '__DESCRIPTION__')]
def test_parse_row_day_range():
"""
If the input row contains a day range, it is outputted
"""
row = {'mon-wed': '3', 'description': '__DESCRIPTION__'}
with patch('week_parser.base.populate_extra_data') as mock_populate:
week_data = parse_row(row)
assert week_data == {
'mon': {'day': 'mon', 'value': 3},
'tue': {'day': 'tue', 'value': 3},
'wed': {'day': 'wed', 'value': 3},
}
assert mock_populate.call_args_list == [call(week_data, '__DESCRIPTION__')]
def test_parse_row_extra_columns():
"""
If the input row contains any extra columns, they are skipped
"""
row = {'wed': '2', 'description': '__DESCRIPTION__',
'__FOO__': '__BAR__', '__ANYTHING__': '__ELSE__'}
with patch('week_parser.base.populate_extra_data') as mock_populate:
week_data = parse_row(row)
assert week_data == {'wed': {'day': 'wed', 'value': 2}}
assert mock_populate.call_args_list == [call(week_data, '__DESCRIPTION__')]
def test_parse_row_not_int_value():
"""
If the day value is not an integer, we get a ValueError
"""
row = {'mon': '__NOT_A_NUMBER__', 'description': '__DESCRIPTION__'}
with patch('week_parser.base.populate_extra_data') as mock_populate:
with pytest.raises(ValueError) as exc:
parse_row(row)
assert mock_populate.call_count == 0
assert str(exc.value) == (
"invalid literal for int() with base 10: '__NOT_A_NUMBER__'")
def test_parse_row_invalid_day_range():
"""
If the input row contains an invalid day range, we skip it
"""
row = {'foo-bar': '3', 'description': '__DESCRIPTION__'}
with patch('week_parser.base.populate_extra_data') as mock_populate:
week_data = parse_row(row)
assert week_data == {}
assert mock_populate.call_args_list == [call(week_data, '__DESCRIPTION__')]
def test_parse_row():
"""
An input row may contain any combination of day ranges
"""
row = {'mon-tue': '3', 'wed-thu': '2', 'fri': '1',
'__SOME__': '__DATA__', 'description': '__DESCRIPTION__'}
with patch('week_parser.base.populate_extra_data') as mock_populate:
week_data = parse_row(row)
assert week_data == {
'mon': {'day': 'mon', 'value': 3},
'tue': {'day': 'tue', 'value': 3},
'wed': {'day': 'wed', 'value': 2},
'thu': {'day': 'thu', 'value': 2},
'fri': {'day': 'fri', 'value': 1},
}
assert mock_populate.call_args_list == [call(week_data, '__DESCRIPTION__')]
def test_parse_week_empty_file():
"""
We can process an empty file
"""
filename = 'anything.csv'
with utils.mock_open(file_content='') as mock_open:
with patch('week_parser.base.parse_row') as mock_parse_week:
result = parse_week(filename)
assert result == []
assert mock_open.call_args_list == [call(filename)]
assert mock_parse_week.call_count == 0
def test_parse_week_valid_file():
"""
We can process a file with valid content
"""
filename = 'anything.csv'
csv_data = ('mon,tue,some_column1,wed,thu,fri,description\n'
'1,5,data,2,3,3,first_desc\n')
expected_row = {'mon': '1', 'tue': '5', 'wed': '2', 'thu': '3', 'fri': '3',
'description': 'first_desc', 'some_column1': 'data'}
with utils.mock_open(file_content=csv_data) as mock_open:
with patch('week_parser.base.parse_row') as mock_parse_row:
mock_parse_row.return_value = {'mon': {'day': 'mon'}}
result = parse_week(filename)
assert result == [{'day': 'mon'}]
assert mock_open.call_args_list == [call(filename)]
assert mock_parse_row.call_args_list == [call(expected_row)]
def test_pprint_bytes(capsys):
printer = PrettyPrinter()
printer.pprint(six.b('__FOO__'))
out, err = capsys.readouterr()
assert err == ''
assert out == "'__FOO__'\n"
def test_pprint_unicode(capsys):
printer = PrettyPrinter()
printer.pprint(six.u('__FOO__'))
out, err = capsys.readouterr()
assert err == ''
assert out == "'__FOO__'\n"
|
JoseKilo/week_parser
|
tests/unit/test_week_parser.py
|
Python
|
mit
| 5,936
| 0
|
from contextlib import nullcontext
import numpy as np
from .numeric import uint8, ndarray, dtype
from numpy.compat import os_fspath, is_pathlib_path
from numpy.core.overrides import set_module
__all__ = ['memmap']
dtypedescr = dtype
valid_filemodes = ["r", "c", "r+", "w+"]
writeable_filemodes = ["r+", "w+"]
mode_equivalents = {
"readonly":"r",
"copyonwrite":"c",
"readwrite":"r+",
"write":"w+"
}
@set_module('numpy')
class memmap(ndarray):
"""Create a memory-map to an array stored in a *binary* file on disk.
Memory-mapped files are used for accessing small segments of large files
on disk, without reading the entire file into memory. NumPy's
memmap's are array-like objects. This differs from Python's ``mmap``
module, which uses file-like objects.
This subclass of ndarray has some unpleasant interactions with
some operations, because it doesn't quite fit properly as a subclass.
An alternative to using this subclass is to create the ``mmap``
object yourself, then create an ndarray with ndarray.__new__ directly,
passing the object created in its 'buffer=' parameter.
This class may at some point be turned into a factory function
which returns a view into an mmap buffer.
Flush the memmap instance to write the changes to the file. Currently there
is no API to close the underlying ``mmap``. It is tricky to ensure the
resource is actually closed, since it may be shared between different
memmap instances.
Parameters
----------
filename : str, file-like object, or pathlib.Path instance
The file name or file object to be used as the array data buffer.
dtype : data-type, optional
The data-type used to interpret the file contents.
Default is `uint8`.
mode : {'r+', 'r', 'w+', 'c'}, optional
The file is opened in this mode:
+------+-------------------------------------------------------------+
| 'r' | Open existing file for reading only. |
+------+-------------------------------------------------------------+
| 'r+' | Open existing file for reading and writing. |
+------+-------------------------------------------------------------+
| 'w+' | Create or overwrite existing file for reading and writing. |
+------+-------------------------------------------------------------+
| 'c' | Copy-on-write: assignments affect data in memory, but |
| | changes are not saved to disk. The file on disk is |
| | read-only. |
+------+-------------------------------------------------------------+
Default is 'r+'.
offset : int, optional
In the file, array data starts at this offset. Since `offset` is
measured in bytes, it should normally be a multiple of the byte-size
of `dtype`. When ``mode != 'r'``, even positive offsets beyond end of
file are valid; The file will be extended to accommodate the
additional data. By default, ``memmap`` will start at the beginning of
the file, even if ``filename`` is a file pointer ``fp`` and
``fp.tell() != 0``.
shape : tuple, optional
The desired shape of the array. If ``mode == 'r'`` and the number
of remaining bytes after `offset` is not a multiple of the byte-size
of `dtype`, you must specify `shape`. By default, the returned array
will be 1-D with the number of elements determined by file size
and data-type.
order : {'C', 'F'}, optional
Specify the order of the ndarray memory layout:
:term:`row-major`, C-style or :term:`column-major`,
Fortran-style. This only has an effect if the shape is
greater than 1-D. The default order is 'C'.
Attributes
----------
filename : str or pathlib.Path instance
Path to the mapped file.
offset : int
Offset position in the file.
mode : str
File mode.
Methods
-------
flush
Flush any changes in memory to file on disk.
When you delete a memmap object, flush is called first to write
changes to disk.
See also
--------
lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
Notes
-----
The memmap object can be used anywhere an ndarray is accepted.
Given a memmap ``fp``, ``isinstance(fp, numpy.ndarray)`` returns
``True``.
Memory-mapped files cannot be larger than 2GB on 32-bit systems.
When a memmap causes a file to be created or extended beyond its
current size in the filesystem, the contents of the new part are
unspecified. On systems with POSIX filesystem semantics, the extended
part will be filled with zero bytes.
Examples
--------
>>> data = np.arange(12, dtype='float32')
>>> data.resize((3,4))
This example uses a temporary file so that doctest doesn't write
files to your directory. You would use a 'normal' filename.
>>> from tempfile import mkdtemp
>>> import os.path as path
>>> filename = path.join(mkdtemp(), 'newfile.dat')
Create a memmap with dtype and shape that matches our data:
>>> fp = np.memmap(filename, dtype='float32', mode='w+', shape=(3,4))
>>> fp
memmap([[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]], dtype=float32)
Write data to memmap array:
>>> fp[:] = data[:]
>>> fp
memmap([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
>>> fp.filename == path.abspath(filename)
True
Flushes memory changes to disk in order to read them back
>>> fp.flush()
Load the memmap and verify data was stored:
>>> newfp = np.memmap(filename, dtype='float32', mode='r', shape=(3,4))
>>> newfp
memmap([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
Read-only memmap:
>>> fpr = np.memmap(filename, dtype='float32', mode='r', shape=(3,4))
>>> fpr.flags.writeable
False
Copy-on-write memmap:
>>> fpc = np.memmap(filename, dtype='float32', mode='c', shape=(3,4))
>>> fpc.flags.writeable
True
It's possible to assign to copy-on-write array, but values are only
written into the memory copy of the array, and not written to disk:
>>> fpc
memmap([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
>>> fpc[0,:] = 0
>>> fpc
memmap([[ 0., 0., 0., 0.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
File on disk is unchanged:
>>> fpr
memmap([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
Offset into a memmap:
>>> fpo = np.memmap(filename, dtype='float32', mode='r', offset=16)
>>> fpo
memmap([ 4., 5., 6., 7., 8., 9., 10., 11.], dtype=float32)
"""
__array_priority__ = -100.0
def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0,
shape=None, order='C'):
# Import here to minimize 'import numpy' overhead
import mmap
import os.path
try:
mode = mode_equivalents[mode]
except KeyError as e:
if mode not in valid_filemodes:
raise ValueError(
"mode must be one of {!r} (got {!r})"
.format(valid_filemodes + list(mode_equivalents.keys()), mode)
) from None
if mode == 'w+' and shape is None:
raise ValueError("shape must be given")
if hasattr(filename, 'read'):
f_ctx = nullcontext(filename)
else:
f_ctx = open(os_fspath(filename), ('r' if mode == 'c' else mode)+'b')
with f_ctx as fid:
fid.seek(0, 2)
flen = fid.tell()
descr = dtypedescr(dtype)
_dbytes = descr.itemsize
if shape is None:
bytes = flen - offset
if bytes % _dbytes:
raise ValueError("Size of available data is not a "
"multiple of the data-type size.")
size = bytes // _dbytes
shape = (size,)
else:
if not isinstance(shape, tuple):
shape = (shape,)
size = np.intp(1) # avoid default choice of np.int_, which might overflow
for k in shape:
size *= k
bytes = int(offset + size*_dbytes)
if mode in ('w+', 'r+') and flen < bytes:
fid.seek(bytes - 1, 0)
fid.write(b'\0')
fid.flush()
if mode == 'c':
acc = mmap.ACCESS_COPY
elif mode == 'r':
acc = mmap.ACCESS_READ
else:
acc = mmap.ACCESS_WRITE
start = offset - offset % mmap.ALLOCATIONGRANULARITY
bytes -= start
array_offset = offset - start
mm = mmap.mmap(fid.fileno(), bytes, access=acc, offset=start)
self = ndarray.__new__(subtype, shape, dtype=descr, buffer=mm,
offset=array_offset, order=order)
self._mmap = mm
self.offset = offset
self.mode = mode
if is_pathlib_path(filename):
# special case - if we were constructed with a pathlib.path,
# then filename is a path object, not a string
self.filename = filename.resolve()
elif hasattr(fid, "name") and isinstance(fid.name, str):
# py3 returns int for TemporaryFile().name
self.filename = os.path.abspath(fid.name)
# same as memmap copies (e.g. memmap + 1)
else:
self.filename = None
return self
def __array_finalize__(self, obj):
if hasattr(obj, '_mmap') and np.may_share_memory(self, obj):
self._mmap = obj._mmap
self.filename = obj.filename
self.offset = obj.offset
self.mode = obj.mode
else:
self._mmap = None
self.filename = None
self.offset = None
self.mode = None
def flush(self):
"""
Write any changes in the array to the file on disk.
For further information, see `memmap`.
Parameters
----------
None
See Also
--------
memmap
"""
if self.base is not None and hasattr(self.base, 'flush'):
self.base.flush()
def __array_wrap__(self, arr, context=None):
arr = super().__array_wrap__(arr, context)
# Return a memmap if a memmap was given as the output of the
# ufunc. Leave the arr class unchanged if self is not a memmap
# to keep original memmap subclasses behavior
if self is arr or type(self) is not memmap:
return arr
# Return scalar instead of 0d memmap, e.g. for np.sum with
# axis=None
if arr.shape == ():
return arr[()]
# Return ndarray otherwise
return arr.view(np.ndarray)
def __getitem__(self, index):
res = super().__getitem__(index)
if type(res) is memmap and res._mmap is None:
return res.view(type=ndarray)
return res
|
anntzer/numpy
|
numpy/core/memmap.py
|
Python
|
bsd-3-clause
| 11,688
| 0.000684
|
# -*- coding: utf-8 -*-
#
# libxmlquery documentation build configuration file, created by
# sphinx-quickstart on Fri Nov 5 15:13:45 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.ifconfig']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'libxmlquery'
copyright = u'2010, Frederico Gonçalves, Vasco Fernandes'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'libxmlquerydoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'libxmlquery.tex', u'libxmlquery Documentation',
u'Frederico Gonçalves, Vasco Fernandes', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'libxmlquery', u'libxmlquery Documentation',
[u'Frederico Gonçalves, Vasco Fernandes'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'libxmlquery'
epub_author = u'Frederico Gonçalves, Vasco Fernandes'
epub_publisher = u'Frederico Gonçalves, Vasco Fernandes'
epub_copyright = u'2010, Frederico Gonçalves, Vasco Fernandes'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
|
nullable/libxmlquery
|
documentation/conf.py
|
Python
|
mit
| 8,376
| 0.00693
|
def main(request, response):
headers = {
# CORS-safelisted
"content-type": "text/plain",
"cache-control": "no cache",
"content-language": "en",
"expires": "Fri, 30 Oct 1998 14:19:41 GMT",
"last-modified": "Tue, 15 Nov 1994 12:45:26 GMT",
"pragma": "no-cache",
# Non-CORS-safelisted
"x-test": "foobar",
"Access-Control-Allow-Origin": "*"
}
for header in headers:
response.headers.set(header, headers[header])
response.content = "PASS: Cross-domain access allowed."
|
paulrouget/servo
|
tests/wpt/web-platform-tests/xhr/resources/access-control-basic-whitelist-response-headers.py
|
Python
|
mpl-2.0
| 571
| 0
|
#!/usr/bin/env python
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Unit tests for the contents of cpu_temperature.py
"""
# pylint: disable=unused-argument
import logging
import unittest
from devil import devil_env
from devil.android import cpu_temperature
from devil.android import device_utils
from devil.utils import mock_calls
from devil.android.sdk import adb_wrapper
with devil_env.SysPath(devil_env.PYMOCK_PATH):
import mock # pylint: disable=import-error
class CpuTemperatureTest(mock_calls.TestCase):
@mock.patch('devil.android.perf.perf_control.PerfControl', mock.Mock())
def setUp(self):
# Mock the device
self.mock_device = mock.Mock(spec=device_utils.DeviceUtils)
self.mock_device.build_product = 'blueline'
self.mock_device.adb = mock.Mock(spec=adb_wrapper.AdbWrapper)
self.mock_device.FileExists.return_value = True
self.cpu_temp = cpu_temperature.CpuTemperature(self.mock_device)
self.cpu_temp.InitThermalDeviceInformation()
class CpuTemperatureInitTest(unittest.TestCase):
@mock.patch('devil.android.perf.perf_control.PerfControl', mock.Mock())
def testInitWithDeviceUtil(self):
d = mock.Mock(spec=device_utils.DeviceUtils)
d.build_product = 'blueline'
c = cpu_temperature.CpuTemperature(d)
self.assertEqual(d, c.GetDeviceForTesting())
def testInitWithMissing_fails(self):
with self.assertRaises(TypeError):
cpu_temperature.CpuTemperature(None)
with self.assertRaises(TypeError):
cpu_temperature.CpuTemperature('')
class CpuTemperatureGetThermalDeviceInformationTest(CpuTemperatureTest):
@mock.patch('devil.android.perf.perf_control.PerfControl', mock.Mock())
def testGetThermalDeviceInformation_noneWhenIncorrectLabel(self):
invalid_device = mock.Mock(spec=device_utils.DeviceUtils)
invalid_device.build_product = 'invalid_name'
c = cpu_temperature.CpuTemperature(invalid_device)
c.InitThermalDeviceInformation()
self.assertEqual(c.GetDeviceInfoForTesting(), None)
def testGetThermalDeviceInformation_getsCorrectInformation(self):
correct_information = {
'cpu0': '/sys/class/thermal/thermal_zone11/temp',
'cpu1': '/sys/class/thermal/thermal_zone12/temp',
'cpu2': '/sys/class/thermal/thermal_zone13/temp',
'cpu3': '/sys/class/thermal/thermal_zone14/temp',
'cpu4': '/sys/class/thermal/thermal_zone15/temp',
'cpu5': '/sys/class/thermal/thermal_zone16/temp',
'cpu6': '/sys/class/thermal/thermal_zone17/temp',
'cpu7': '/sys/class/thermal/thermal_zone18/temp'
}
self.assertEqual(
cmp(correct_information,
self.cpu_temp.GetDeviceInfoForTesting().get('cpu_temps')), 0)
class CpuTemperatureIsSupportedTest(CpuTemperatureTest):
@mock.patch('devil.android.perf.perf_control.PerfControl', mock.Mock())
def testIsSupported_returnsTrue(self):
d = mock.Mock(spec=device_utils.DeviceUtils)
d.build_product = 'blueline'
d.FileExists.return_value = True
c = cpu_temperature.CpuTemperature(d)
self.assertTrue(c.IsSupported())
@mock.patch('devil.android.perf.perf_control.PerfControl', mock.Mock())
def testIsSupported_returnsFalse(self):
d = mock.Mock(spec=device_utils.DeviceUtils)
d.build_product = 'blueline'
d.FileExists.return_value = False
c = cpu_temperature.CpuTemperature(d)
self.assertFalse(c.IsSupported())
class CpuTemperatureLetCpuCoolToTemperatureTest(CpuTemperatureTest):
# Return values for the mock side effect
cooling_down0 = (
[45000
for _ in range(8)] + [43000
for _ in range(8)] + [41000 for _ in range(8)])
@mock.patch('time.sleep', mock.Mock())
def testLetBatteryCoolToTemperature_coolWithin24Calls(self):
self.mock_device.ReadFile = mock.Mock(side_effect=self.cooling_down0)
self.cpu_temp.LetCpuCoolToTemperature(42)
self.mock_device.ReadFile.assert_called()
self.assertEquals(self.mock_device.ReadFile.call_count, 24)
cooling_down1 = [45000 for _ in range(8)] + [41000 for _ in range(16)]
@mock.patch('time.sleep', mock.Mock())
def testLetBatteryCoolToTemperature_coolWithin16Calls(self):
self.mock_device.ReadFile = mock.Mock(side_effect=self.cooling_down1)
self.cpu_temp.LetCpuCoolToTemperature(42)
self.mock_device.ReadFile.assert_called()
self.assertEquals(self.mock_device.ReadFile.call_count, 16)
constant_temp = [45000 for _ in range(40)]
@mock.patch('time.sleep', mock.Mock())
def testLetBatteryCoolToTemperature_timeoutAfterThree(self):
self.mock_device.ReadFile = mock.Mock(side_effect=self.constant_temp)
self.cpu_temp.LetCpuCoolToTemperature(42)
self.mock_device.ReadFile.assert_called()
self.assertEquals(self.mock_device.ReadFile.call_count, 24)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main(verbosity=2)
|
endlessm/chromium-browser
|
third_party/catapult/devil/devil/android/cpu_temperature_test.py
|
Python
|
bsd-3-clause
| 4,988
| 0.005413
|
#coding:utf-8
#################################
#Copyright(c) 2014 dtysky
#################################
import G2R
class ScSp(G2R.SpSyntax):
def Show(self,Flag,Attrs,US,UT,Tmp,FS):
sw=''
name,Attrs=self.Check(Flag,Attrs,UT,FS)
if Attrs['k']=='Main':
sw+=' $ store.chapter='
sw+="'Chapter."+Attrs['cp']+Attrs['sc']+"'\n"
return sw
|
dtysky/Gal2Renpy
|
Gal2Renpy/SpSyntax/ScSp.py
|
Python
|
mit
| 352
| 0.073864
|
##
# Copyright 2012-2017 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Declaration of toolchains.compiler namespace.
:author: Stijn De Weirdt (Ghent University)
:author: Kenneth Hoste (Ghent University)
"""
import pkg_resources
pkg_resources.declare_namespace(__name__)
|
ULHPC/easybuild-framework
|
easybuild/toolchains/compiler/__init__.py
|
Python
|
gpl-2.0
| 1,248
| 0.001603
|
# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base spout for integration tests"""
import copy
from heron.common.src.python.utils.log import Log
from heron.api.src.python.spout.spout import Spout
from heron.api.src.python.stream import Stream
from heron.api.src.python.component.component_spec import HeronComponentSpec
import heron.common.src.python.pex_loader as pex_loader
from ..core import constants as integ_const
class IntegrationTestSpout(Spout):
"""Base spout for integration test
Every spout of integration test topology consists of this instance, each delegating user's spout.
"""
outputs = [Stream(fields=[integ_const.INTEGRATION_TEST_TERMINAL],
name=integ_const.INTEGRATION_TEST_CONTROL_STREAM_ID)]
@classmethod
def spec(cls, name, par, config, user_spout_classpath, user_output_fields=None):
python_class_path = "%s.%s" % (cls.__module__, cls.__name__)
config[integ_const.USER_SPOUT_CLASSPATH] = user_spout_classpath
# avoid modification to cls.outputs
_outputs = copy.copy(cls.outputs)
if user_output_fields is not None:
_outputs.extend(user_output_fields)
return HeronComponentSpec(name, python_class_path, is_spout=True, par=par,
inputs=None, outputs=_outputs, config=config)
def initialize(self, config, context):
user_spout_classpath = config.get(integ_const.USER_SPOUT_CLASSPATH, None)
if user_spout_classpath is None:
raise RuntimeError("User defined integration test spout was not found")
user_spout_cls = self._load_user_spout(context.get_topology_pex_path(), user_spout_classpath)
self.user_spout = user_spout_cls(delegate=self)
self.max_executions = config.get(integ_const.USER_MAX_EXECUTIONS, integ_const.MAX_EXECUTIONS)
assert isinstance(self.max_executions, int) and self.max_executions > 0
Log.info("Max executions: %d" % self.max_executions)
self.tuples_to_complete = 0
self.user_spout.initialize(config, context)
@staticmethod
def _load_user_spout(pex_file, classpath):
pex_loader.load_pex(pex_file)
cls = pex_loader.import_and_get_class(pex_file, classpath)
return cls
@property
def is_done(self):
return self.max_executions == 0
def next_tuple(self):
if self.is_done:
return
self.max_executions -= 1
Log.info("max executions: %d" % self.max_executions)
self.user_spout.next_tuple()
if self.is_done:
self._emit_terminal_if_needed()
Log.info("This topology is finished.")
def ack(self, tup_id):
Log.info("Received an ack with tuple id: %s" % str(tup_id))
self.tuples_to_complete -= 1
if tup_id != integ_const.INTEGRATION_TEST_MOCK_MESSAGE_ID:
self.user_spout.ack(tup_id)
self._emit_terminal_if_needed()
def fail(self, tup_id):
Log.info("Received a fail message with tuple id: %s" % str(tup_id))
self.tuples_to_complete -= 1
if tup_id != integ_const.INTEGRATION_TEST_MOCK_MESSAGE_ID:
self.user_spout.fail(tup_id)
self._emit_terminal_if_needed()
def emit(self, tup, tup_id=None, stream=Stream.DEFAULT_STREAM_ID,
direct_task=None, need_task_ids=None):
"""Emits from this integration test spout
Overriden method which will be called when user's spout calls emit()
"""
# if is_control True -> control stream should not count
self.tuples_to_complete += 1
if tup_id is None:
Log.info("Add tup_id for tuple: %s" % str(tup))
_tup_id = integ_const.INTEGRATION_TEST_MOCK_MESSAGE_ID
else:
_tup_id = tup_id
super(IntegrationTestSpout, self).emit(tup, _tup_id, stream, direct_task, need_task_ids)
def _emit_terminal_if_needed(self):
Log.info("is_done: %s, tuples_to_complete: %s" % (self.is_done, self.tuples_to_complete))
if self.is_done and self.tuples_to_complete == 0:
Log.info("Emitting terminals to downstream")
super(IntegrationTestSpout, self).emit([integ_const.INTEGRATION_TEST_TERMINAL],
stream=integ_const.INTEGRATION_TEST_CONTROL_STREAM_ID)
|
srkukarni/heron
|
integration_test/src/python/integration_test/core/integration_test_spout.py
|
Python
|
apache-2.0
| 4,617
| 0.007581
|
from django.db import models
from cms.models import CMSPlugin
CLASS_CHOICES = ['container', 'content', 'teaser']
CLASS_CHOICES = tuple((entry, entry) for entry in CLASS_CHOICES)
TAG_CHOICES = [
'div', 'article', 'section', 'header', 'footer', 'aside',
'h1', 'h2', 'h3', 'h4', 'h5', 'h6'
]
TAG_CHOICES = tuple((entry, entry) for entry in TAG_CHOICES)
class Style(CMSPlugin):
"""
Renders a given ``TAG_CHOICES`` element with additional attributes
"""
label = models.CharField(
verbose_name='Label',
blank=True,
max_length=255,
help_text='Overrides the display name in the structure mode.',
)
tag_type = models.CharField(
verbose_name='Tag type',
choices=TAG_CHOICES,
default=TAG_CHOICES[0][0],
max_length=255,
)
class_name = models.CharField(
verbose_name='Class name',
choices=CLASS_CHOICES,
default=CLASS_CHOICES[0][0],
blank=True,
max_length=255,
)
additional_classes = models.CharField(
verbose_name='Additional classes',
blank=True,
max_length=255,
)
def __str__(self):
return self.label or self.tag_type or str(self.pk)
def get_short_description(self):
# display format:
# Style label <tag> .list.of.classes #id
display = []
classes = []
if self.label:
display.append(self.label)
if self.tag_type:
display.append('<{0}>'.format(self.tag_type))
if self.class_name:
classes.append(self.class_name)
if self.additional_classes:
classes.extend(item.strip() for item in self.additional_classes.split(',') if item.strip())
display.append('.{0}'.format('.'.join(classes)))
return ' '.join(display)
def get_additional_classes(self):
return ' '.join(item.strip() for item in self.additional_classes.split(',') if item.strip())
|
rsalmaso/django-cms
|
cms/test_utils/project/pluginapp/plugins/style/models.py
|
Python
|
bsd-3-clause
| 1,967
| 0.001525
|
"""
The GeometryColumns and SpatialRefSys models for the PostGIS backend.
"""
from django.db import models
from django.contrib.gis.db.backends.base import SpatialRefSysMixin
class GeometryColumns(models.Model):
"""
The 'geometry_columns' table from the PostGIS. See the PostGIS
documentation at Ch. 4.2.2.
"""
f_table_catalog = models.CharField(max_length=256)
f_table_schema = models.CharField(max_length=256)
f_table_name = models.CharField(max_length=256)
f_geometry_column = models.CharField(max_length=256)
coord_dimension = models.IntegerField()
srid = models.IntegerField(primary_key=True)
type = models.CharField(max_length=30)
class Meta:
app_label = 'gis'
db_table = 'geometry_columns'
managed = False
@classmethod
def table_name_col(cls):
"""
Returns the name of the metadata column used to store the
the feature table name.
"""
return 'f_table_name'
@classmethod
def geom_col_name(cls):
"""
Returns the name of the metadata column used to store the
the feature geometry column.
"""
return 'f_geometry_column'
def __unicode__(self):
return "%s.%s - %dD %s field (SRID: %d)" % \
(self.f_table_name, self.f_geometry_column,
self.coord_dimension, self.type, self.srid)
class SpatialRefSys(models.Model, SpatialRefSysMixin):
"""
The 'spatial_ref_sys' table from PostGIS. See the PostGIS
documentaiton at Ch. 4.2.1.
"""
srid = models.IntegerField(primary_key=True)
auth_name = models.CharField(max_length=256)
auth_srid = models.IntegerField()
srtext = models.CharField(max_length=2048)
proj4text = models.CharField(max_length=2048)
class Meta:
app_label = 'gis'
db_table = 'spatial_ref_sys'
managed = False
@property
def wkt(self):
return self.srtext
@classmethod
def wkt_col(cls):
return 'srtext'
|
t11e/django
|
django/contrib/gis/db/backends/postgis/models.py
|
Python
|
bsd-3-clause
| 2,022
| 0.000989
|
__source__ = 'https://leetcode.com/problems/valid-anagram/description/'
# https://github.com/kamyu104/LeetCode/blob/master/Python/valid-anagram.py
# Time: O(n)
# Space: O(1)
#
# Description: Leetcode # 242. Valid Anagram
#
# Given two strings s and t, write a function to
# determine if t is an anagram of s.
#
# For example,
# s = "anagram", t = "nagaram", return true.
# s = "rat", t = "car", return false.
#
# Note:
# You may assume the string contains only lowercase alphabets.
#
# Companies
# Amazon Uber Yelp
# Related Topics
# Hash Table Sort
# Similar Questions
# Group Anagrams Palindrome Permutation Find All Anagrams in a String
#
import unittest
class Solution:
# @param {string} s
# @param {string} t
# @return {boolean}
def isAnagram(self, s, t):
if len(s) != len(t):
return False
count = {}
for c in s:
if c.lower() in count:
count[c.lower()] += 1
else:
count[c.lower()] = 1
for c in t:
if c.lower() in count:
count[c.lower()] -= 1
else:
count[c.lower()] = -1
if count[c.lower()] < 0:
return False
return True
# Time: O(nlogn)
# Space: O(n)
class Solution2:
# @param {string} s
# @param {string} t
# @return {boolean}
def isAnagram(self, s, t):
return sorted(s) == sorted(t)
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
print Solution().isAnagram('a', 'a')
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/valid-anagram/solution/
#
# 4ms 71.69%
class Solution {
public boolean isAnagram(String s, String t) {
if (s.length() != t.length()) {
return false;
}
int[] count = new int[26];
for (int i = 0; i < s.length(); i++) {
count[s.charAt(i) - 'a']++;
}
for (int i = 0; i < t.length(); i++) {
count[t.charAt(i) - 'a']--;
}
for (int i = 0; i < 26; i++) {
if (count[i] != 0) {
return false;
}
}
return true;
}
}
Approach #1 (Sorting) [Accepted]
# Time: O(nlogn)
# Space: O(1)
#7ms 41.66%
class Solution {
public boolean isAnagram(String s, String t) {
if (s.length() != t.length()) {
return false;
}
char[] str1 = s.toCharArray();
char[] str2 = t.toCharArray();
Arrays.sort(str1);
Arrays.sort(str2);
return Arrays.equals(str1, str2);
}
}
# 3ms 81.95%
class Solution {
public boolean isAnagram(String s, String t) {
int [] alp = new int[26];
for(int i = 0;i<s.length();i++) alp[s.charAt(i) - 'a']++;
for(int i = 0;i<t.length();i++) alp[t.charAt(i) - 'a']--;
for(int i : alp) if(i!=0) return false;
return true;
}
}
# 6ms 49.29%
class Solution {
public boolean isAnagram(String s, String t) {
return Arrays.equals(countCharacters(s), countCharacters(t));
}
private int[] countCharacters(String s) {
int[] count = new int[26];
for (int i = 0; i < s.length(); i++) {
count[s.charAt(i) - 'a']++;
}
return count;
}
}
'''
|
JulyKikuAkita/PythonPrac
|
cs15211/ValidAnagram.py
|
Python
|
apache-2.0
| 3,340
| 0.001796
|
from typing import Dict
from unittest import mock
from conductor.accounts.forms import DeactivateForm, SignupForm
from conductor.tests import TestCase
class TestSignupForm(TestCase):
def test_valid(self) -> None:
product_plan = self.ProductPlanFactory.create()
data = {
"username": "matt",
"email": "matt@test.com",
"password": "asecrettoeverybody",
"stripe_token": "tok_1234",
"postal_code": "12345",
}
form = SignupForm(product_plan, data=data)
self.assertTrue(form.is_valid())
self.assertEqual(product_plan, form.product_plan)
def test_required(self) -> None:
product_plan = self.ProductPlanFactory.create()
data: Dict[str, str] = {}
form = SignupForm(product_plan, data=data)
self.assertFalse(form.is_valid())
self.assertIn("username", form.errors)
self.assertIn("email", form.errors)
self.assertIn("password", form.errors)
self.assertIn("stripe_token", form.errors)
self.assertNotIn("postal_code", form.errors)
def test_invalid_password(self) -> None:
product_plan = self.ProductPlanFactory.create()
# Test similar username and password to ensure a user instance
# is present and valuable.
data = {
"username": "mattlayman",
"email": "matt@test.com",
"password": "mattlayman",
"stripe_token": "tok_1234",
"postal_code": "12345",
}
form = SignupForm(product_plan, data=data)
self.assertFalse(form.is_valid())
self.assertIn("password", form.errors)
def test_unique_email(self) -> None:
product_plan = self.ProductPlanFactory.create()
self.UserFactory.create(email="matt@test.com")
data = {
"username": "matt",
"email": "matt@test.com",
"password": "asecrettoeverybody",
"stripe_token": "tok_1234",
"postal_code": "12345",
}
form = SignupForm(product_plan, data=data)
self.assertFalse(form.is_valid())
self.assertIn("email", form.errors)
def test_unique_username(self) -> None:
product_plan = self.ProductPlanFactory.create()
self.UserFactory.create(username="matt")
data = {
"username": "matt",
"email": "matt@test.com",
"password": "asecrettoeverybody",
"stripe_token": "tok_1234",
"postal_code": "12345",
}
form = SignupForm(product_plan, data=data)
self.assertFalse(form.is_valid())
self.assertIn("username", form.errors)
@mock.patch("conductor.accounts.forms.stripe_gateway")
def test_creates_user(self, stripe_gateway: mock.MagicMock) -> None:
product_plan = self.ProductPlanFactory.create()
stripe_gateway.create_customer.return_value = "cus_1234"
data = {
"username": "matt",
"email": "matt@test.com",
"password": "asecrettoeverybody",
"stripe_token": "tok_1234",
"postal_code": "21702",
}
form = SignupForm(product_plan, data=data)
self.assertTrue(form.is_valid())
user = form.save()
self.assertEqual(user.username, "matt")
self.assertEqual(user.email, "matt@test.com")
self.assertEqual(user.profile.postal_code, "21702")
self.assertEqual(user.profile.stripe_customer_id, "cus_1234")
@mock.patch("conductor.accounts.forms.stripe_gateway")
def test_missing_postal_code(self, stripe_gateway: mock.MagicMock) -> None:
product_plan = self.ProductPlanFactory.create()
stripe_gateway.create_customer.return_value = "cus_1234"
data = {
"username": "matt",
"email": "matt@test.com",
"password": "asecrettoeverybody",
"stripe_token": "tok_1234",
"postal_code": None,
}
form = SignupForm(product_plan, data=data)
self.assertTrue(form.is_valid())
user = form.save()
self.assertEqual(user.profile.postal_code, "")
class TestDeactivateForm(TestCase):
def test_matching_email(self) -> None:
user = self.UserFactory.create()
data = {"email": user.email}
form = DeactivateForm(user, data=data)
is_valid = form.is_valid()
self.assertTrue(is_valid)
def test_mismatched_email(self) -> None:
user = self.UserFactory.create()
data = {"email": f"nomatch-{user.email}"}
form = DeactivateForm(user, data=data)
is_valid = form.is_valid()
self.assertFalse(is_valid)
self.assertIn("email", form.errors)
@mock.patch("conductor.accounts.forms.stripe_gateway")
def test_save(self, stripe_gateway: mock.MagicMock) -> None:
"""The user subscription gets cancelled and the user is marked inactive."""
user = self.UserFactory.create()
form = DeactivateForm(user)
form.save()
stripe_gateway.cancel_subscription.assert_called_once_with(user)
user.refresh_from_db()
self.assertFalse(user.is_active)
|
mblayman/lcp
|
conductor/accounts/tests/test_forms.py
|
Python
|
bsd-2-clause
| 5,205
| 0.000192
|
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'pasportaservo',
'USER': 'guillaume',
}
}
LANGUAGE_CODE = 'en'
INSTALLED_APPS = (
'grappelli',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'django_countries',
'phonenumber_field',
'bootstrapform',
'leaflet',
'postman',
'hosting',
'pages',
'debug_toolbar',
)
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
|
LaPingvino/pasportaservo
|
pasportaservo/settings/dev_etenil.py
|
Python
|
agpl-3.0
| 652
| 0
|
#
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2013 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <info@star2billing.com>
#
import newfies
from django.conf import settings
def newfies_version(request):
return {'newfies_version': newfies.__version__, 'SURVEYDEV': settings.SURVEYDEV}
|
garyjs/Newfiesautodialer
|
newfies/context_processors.py
|
Python
|
mpl-2.0
| 564
| 0.001773
|
from shutit_module import ShutItModule
import base64
class openshift_airflow(ShutItModule):
def build(self, shutit):
shutit.send('cd /tmp/openshift_vm')
shutit.login(command='vagrant ssh')
shutit.login(command='sudo su -',password='vagrant',note='Become root (there is a problem logging in as admin with the vagrant user')
# AIRFLOW BUILD
# Takes too long.
#shutit.send('oc describe buildconfig airflow',note='Ideally you would take this github url, and update your github webhooks for this project. But there is no public URL for this server so we will skip and trigger a build manually.')
#shutit.send('oc start-build airflow',note='Trigger a build by hand')
#shutit.send('sleep 60 && oc logs -f build/airflow-1',note='Follow the build and wait for it to terminate')
# IMAGE STREAM
shutit.send_file('/tmp/imagestream.json','''
{
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
"name": "airflow"
},
"spec": {},
"status": {
"dockerImageRepository": ""
}
}''')
shutit.send('oc create -f /tmp/imagestream.json')
# BUILD CONFIG
shutit.send_file('secret.json','''{
"apiVersion": "v1",
"kind": "Secret",
"metadata": {
"name": "mysecret"
},
"namespace": "user2",
"data": {
"username": "''' + base64.b64encode('myusername') + '''"
}
}''')
shutit.send('oc create -f secret.json')
shutit.send_file('/tmp/buildconfig.json','''
{
"kind": "BuildConfig",
"apiVersion": "v1",
"metadata": {
"name": "airflow",
"labels": {
"name": "airflow-build"
}
},
"spec": {
"source": {
"type": "Git",
"git": {
"uri": "https://github.com/ianmiell/shutit-airflow"
}
},
"strategy": {
"type": "Docker"
},
"output": {
"to": {
"kind": "ImageStreamTag",
"name": "airflow:latest"
}
},
"volumes": {
"name": "secvol",
"secret": {
"secretname": "mysecret"
}
}
}
}
''')
shutit.send('oc create -f /tmp/buildconfig.json')
# DEPLOYMENT CONFIG
shutit.send_file('/tmp/deploymentconfig.json','''
{
"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
"name": "airflow"
},
"spec": {
"strategy": {
"type": "Rolling",
"rollingParams": {
"updatePeriodSeconds": 1,
"intervalSeconds": 1,
"timeoutSeconds": 120
},
"resources": {}
},
"triggers": [
{
"type": "ImageChange",
"imageChangeParams": {
"automatic": true,
"containerNames": [
"nodejs-helloworld"
],
"from": {
"kind": "ImageStreamTag",
"name": "airflow:latest"
}
}
},
{
"type": "ConfigChange"
}
],
"replicas": 1,
"selector": {
"name":"airflow"
},
"template": {
"metadata": {
"labels": {
"name": "airflow"
}
},
"spec": {
"containers": [
{
"name": "airflow",
"image": "airflow",
"ports": [
{
"containerPort": 8080,
"protocol": "TCP"
}
],
"resources": {},
"terminationMessagePath": "/dev/termination-log",
"imagePullPolicy": "IfNotPresent",
"securityContext": {
"capabilities": {},
"privileged": false
}
}
],
"restartPolicy": "Always",
"dnsPolicy": "ClusterFirst"
}
}
},
"status": {}
}
''')
shutit.send('oc create -f /tmp/deploymentconfig.json')
shutit.logout()
shutit.logout()
return True
def module():
return openshift_airflow(
'shutit.openshift_vm.openshift_vm.openshift_airflow', 1418326706.005,
description='',
maintainer='',
delivery_methods=['bash'],
depends=['shutit.openshift_vm.openshift_vm.openshift_vm']
)
|
ianmiell/shutit-openshift-vm
|
airflow.py
|
Python
|
mit
| 4,398
| 0.012051
|
from tkinter import *
from gui import GUI
from reminder import Reminder
import argparse
import time
if __name__ == '__main__':
print("""
Copyright (C) 2016 Logvinov Dima.
This program comes with ABSOLUTELY NO WARRANTY.
This is free software, and you are welcome to redistribute it
under certain conditions.
""")
parser = argparse.ArgumentParser(description="PyReminder - python reminder app for ElementaryOS.")
parser.add_argument('--add', help="Add new event.\n"
"$ pyreminder --add 'Event text:Event time hours.minutes.day.month.year'", type=str)
parser.add_argument('--list', help="Print list of events.", action="store_true")
parser.add_argument('--delete', help="Delete event.\n "
"$ pyreminder --delete event_id ", type=int)
parser.add_argument('--gui', help="Run gui program.", action="store_true")
args = parser.parse_args()
reminder = Reminder()
if args.gui:
root = Tk()
root.geometry("500x200+350+500")
app = GUI(root, reminder)
root.mainloop()
if args.add:
event_text, event_date = args.add.split(":")
reminder.add_task(event_date, event_text)
reminder.update_db()
if args.list:
tasks = reminder.get_tasks_list()
if len(tasks) > 0:
for task_id in range(0, len(tasks)):
task_id = str(task_id)
print("id:{0} time:{1} text:{2}".
format(task_id, tasks[task_id][0], tasks[task_id][1]))
if args.delete:
if not reminder.delete_task(str(args.delete)):
print("Task: {} not found.".format(str(args.delete)))
|
vonivgol/pyreminder
|
src/main.py
|
Python
|
gpl-2.0
| 1,723
| 0.002902
|
"""
Saving and loading data or models
"""
from __future__ import print_function
from itertools import chain
import codecs
import copy
import csv
import json
import sys
import time
import traceback
import joblib
from sklearn.datasets import load_svmlight_file
from .edu import (EDU, FAKE_ROOT_ID, FAKE_ROOT)
from .table import (DataPack, DataPackException,
UNKNOWN, UNRELATED,
get_label_string, groupings)
from .util import truncate
# pylint: disable=too-few-public-methods
class IoException(Exception):
"""
Exceptions related to reading/writing data
"""
def __init__(self, msg):
super(IoException, self).__init__(msg)
# ---------------------------------------------------------------------
# feedback
# ---------------------------------------------------------------------
# pylint: disable=redefined-builtin, invalid-name
class Torpor(object):
"""
Announce that we're about to do something, then do it,
then say we're done.
Usage: ::
with Torpor("doing a slow thing"):
some_slow_thing
Output (1): ::
doing a slow thing...
Output (2a): ::
doing a slow thing... done
Output (2b): ::
doing a slow thing... ERROR
<stack trace>
:param quiet: True to skip the message altogether
"""
def __init__(self, msg,
sameline=True,
quiet=False,
file=sys.stderr):
self._msg = msg
self._file = file
self._sameline = sameline
self._quiet = quiet
self._start = 0
self._end = 0
def __enter__(self):
# we grab the wall time instead of using time.clock() (A)
# because we # are not using this for profiling but just to
# get a rough idea what's going on, and (B) because we want
# to include things like IO into the mix
self._start = time.time()
if self._quiet:
return
elif self._sameline:
print(self._msg, end="... ", file=self._file)
else:
print("[start]", self._msg, file=self._file)
def __exit__(self, type, value, tb):
self._end = time.time()
if tb is None:
if not self._quiet:
done = "done" if self._sameline else "[-end-] " + self._msg
ms_elapsed = 1000 * (self._end - self._start)
final_msg = u"{} [{:.0f} ms]".format(done, ms_elapsed)
print(final_msg, file=self._file)
else:
if not self._quiet:
oops = "ERROR!" if self._sameline else "ERROR! " + self._msg
print(oops, file=self._file)
traceback.print_exception(type, value, tb)
sys.exit(1)
# pylint: redefined-builtin, invalid-name
# ---------------------------------------------------------------------
# tables
# ---------------------------------------------------------------------
def load_edus(edu_file):
"""
Read EDUs (see :doc:`../input`)
:rtype: [EDU]
.. _format: https://github.com/kowey/attelo/doc/inputs.rst
"""
def read_edu(row):
'interpret a single row'
expected_len = 6
if len(row) != expected_len:
oops = ('This row in the EDU file {efile} has {num} '
'elements instead of the expected {expected}: '
'{row}')
raise IoException(oops.format(efile=edu_file,
num=len(row),
expected=expected_len,
row=row))
[global_id, txt, grouping, subgrouping, start_str, end_str] = row
start = int(start_str)
end = int(end_str)
return EDU(global_id,
txt.decode('utf-8'),
start,
end,
grouping,
subgrouping)
with open(edu_file, 'rb') as instream:
reader = csv.reader(instream, dialect=csv.excel_tab)
return [read_edu(r) for r in reader if r]
def load_pairings(edu_file):
"""
Read and return EDU pairings (see :doc:`../input`).
We assume the order is parent, child
:rtype: [(string, string)]
.. _format: https://github.com/kowey/attelo/doc/inputs.rst
"""
def read_pair(row):
'interpret a single row'
if len(row) < 2 or len(row) > 3:
oops = ('This row in the pairings file {efile} has '
'{num} elements instead of the expected 2 or 3')
raise IoException(oops.format(efile=edu_file,
num=len(row),
row=row))
return tuple(row[:2])
with open(edu_file, 'rb') as instream:
reader = csv.reader(instream, dialect=csv.excel_tab)
return [read_pair(r) for r in reader if r]
def load_labels(feature_file):
"""
Read the very top of a feature file and read the labels comment,
return the sequence of labels, else return None
:rtype: [string] or None
"""
with codecs.open(feature_file, 'r', 'utf-8') as stream:
line = stream.readline()
if line.startswith('#'):
seq = line[1:].split()
if seq[0] == 'labels:':
return seq[1:]
# fall-through case, no labels found
return None
def _process_edu_links(edus, pairings):
"""
Convert from the results of :py:method:load_edus: and
:py:method:load_pairings: to a sequence of edus and pairings
respectively
:rtype: ([EDU], [(EDU,EDU)])
"""
edumap = {e.id: e for e in edus}
enames = frozenset(chain.from_iterable(pairings))
if FAKE_ROOT_ID in enames:
edus2 = [FAKE_ROOT] + edus
edumap[FAKE_ROOT_ID] = FAKE_ROOT
else:
edus2 = copy.copy(edus)
naughty = [x for x in enames if x not in edumap]
if naughty:
oops = ('The pairings file mentions the following EDUs but the EDU '
'file does not actually include EDUs to go with them: {}')
raise DataPackException(oops.format(truncate(', '.join(naughty),
1000)))
pairings2 = [(edumap[e1], edumap[e2]) for e1, e2 in pairings]
return edus2, pairings2
def load_multipack(edu_file, pairings_file, feature_file, vocab_file,
verbose=False):
"""
Read EDUs and features for edu pairs.
Perform some basic sanity checks, raising
:py:class:`IoException` if they should fail
:rtype: :py:class:`Multipack` or None
"""
vocab = load_vocab(vocab_file)
with Torpor("Reading edus and pairings", quiet=not verbose):
edus, pairings = _process_edu_links(load_edus(edu_file),
load_pairings(pairings_file))
with Torpor("Reading features", quiet=not verbose):
labels = [UNKNOWN] + load_labels(feature_file)
# pylint: disable=unbalanced-tuple-unpacking
data, targets = load_svmlight_file(feature_file,
n_features=len(vocab))
# pylint: enable=unbalanced-tuple-unpacking
with Torpor("Build data packs", quiet=not verbose):
dpack = DataPack.load(edus, pairings, data, targets,
labels, vocab)
return {k: dpack.selected(idxs) for
k, idxs in groupings(pairings).items()}
def load_vocab(filename):
"read feature vocabulary"
features = []
with codecs.open(filename, 'r', 'utf-8') as stream:
for line in stream:
features.append(line.split('\t')[0])
return features
# ---------------------------------------------------------------------
# predictions
# ---------------------------------------------------------------------
def write_predictions_output(dpack, predicted, filename):
"""
Write predictions to an output file whose format
is documented in :doc:`../output`
"""
links = {}
for edu1, edu2, label in predicted:
links[(edu1, edu2)] = label
def mk_row(edu1, edu2):
'return a list of columns'
edu1_id = edu1.id
edu2_id = edu2.id
row = [edu1_id,
edu2_id,
links.get((edu1_id, edu2_id), UNRELATED)]
return [x.encode('utf-8') for x in row]
with open(filename, 'wb') as fout:
writer = csv.writer(fout, dialect=csv.excel_tab)
# by convention the zeroth edu is the root node
for edu1, edu2 in dpack.pairings:
writer.writerow(mk_row(edu1, edu2))
def load_predictions(edu_file):
"""
Read back predictions (see :doc:`../output`), returning a list
of triples: parent id, child id, relation label (or 'UNRELATED')
:rtype: [(string, string, string)]
"""
def mk_pair(row):
'interpret a single row'
expected_len = 3
if len(row) < expected_len:
oops = ('This row in the predictions file {efile} has {num} '
'elements instead of the expected {expected}: '
'{row}')
raise IoException(oops.format(efile=edu_file,
num=len(row),
expected=expected_len,
row=row))
return tuple(x.decode('utf-8') for x in row)
with open(edu_file, 'rb') as instream:
reader = csv.reader(instream, dialect=csv.excel_tab)
return [mk_pair(r) for r in reader if r]
def load_gold_predictions(pairings_file, feature_file, verbose=False):
"""
Load a pairings and feature file as though it were a set of
predictions
:rtype: [(string, string, string)]
"""
pairings = load_pairings(pairings_file)
with Torpor("Reading features", quiet=not verbose):
labels = load_labels(feature_file)
# pylint: disable=unbalanced-tuple-unpacking
_, targets = load_svmlight_file(feature_file)
# pylint: enable=unbalanced-tuple-unpacking
return [(x1, x2, get_label_string(labels, t))
for ((x1, x2), t) in zip(pairings, targets)]
# ---------------------------------------------------------------------
# models
# ---------------------------------------------------------------------
def load_model(filename):
"""
Load model into memory from file.
Note that we consider the filename 'oracle' to be special.
Instead of loading a model, we simply return the virtual
oracle decoder
Returns
-------
model: object
some sort of classifier (eg, an attelo.learn.AttachClassifier
or an attelo.learn.LabelClassifier)
"""
return joblib.load(filename)
def save_model(filename, model):
"""
Dump model into a file
"""
joblib.dump(model, filename)
# ---------------------------------------------------------------------
# folds
# ---------------------------------------------------------------------
def load_fold_dict(filename):
"""
Load fold dictionary into memory from file
"""
with open(filename, 'r') as stream:
return json.load(stream)
def save_fold_dict(fold_dict, filename):
"""
Dump fold dictionary to a file
"""
with open(filename, 'w') as stream:
json.dump(fold_dict, stream, indent=2)
|
kowey/attelo
|
attelo/io.py
|
Python
|
gpl-3.0
| 11,402
| 0
|
def printMap(the_map,note):
print(note)
for row in the_map:
row_str = ""
for cell in row:
row_str += " {0:3d}".format(cell)
print(row_str)
def pathFinder(x, y, the_map, steps, lastX, lastY, wall):
# count possible moves
debug = False
options = []
if x-1 >= 0: # East
options.append([-1, 0])
if x+1 <= lastX: # West
options.append([ 1, 0])
if y-1 >= 0: # North
options.append([ 0,-1])
if y+1 <= lastY: # South
options.append([ 0, 1])
# increment step
steps += 1
if debug:
printMap(the_map,"({0:2d},{1:2d}) steps:{2:3d} {3:6} before options ---------------------------------".format(x,y,steps,wall))
for option in options:
# new x and y
newX = x + option[0]
# print("x({0:2d}) + option[0]({1:2d}) -> newX({2:2d})".format(x,option[0],newX) )
newY = y + option[1]
# print("y({0:2d}) + option[1]({1:2d}) -> newY({2:2d})".format(y,option[1],newY) )
if debug:
print(" looking at ({0:2d},{1:2d}) with value={2:2d} and with steps:{3:3d} {4:6} from ({5:2d},{6:2d})".format(newX,newY,the_map[newY][newX],steps,wall,x,y))
# if statements
if the_map[newY][newX] == 0:
the_map[newY][newX] = steps
if newX != 0 or newY != 0:
pathFinder(newX, newY, the_map, steps, lastX, lastY, wall)
elif the_map[newY][newX] > 1 and steps <= the_map[newY][newX]:
the_map[newY][newX] = steps
if newX != 0 or newY != 0:
pathFinder(newX, newY, the_map, steps, lastX, lastY, wall)
elif ( the_map[newY][newX] == 1 or the_map[newY][newX] < 0 ) and not wall and (newX != lastX or newY != lastY):
if debug:
print("Removing a wall at {0:2d}:{1:2d}".format(newX,newY))
wall = True
the_map[newY][newX] = steps * -1
pathFinder(newX, newY, the_map, steps, lastX, lastY, wall)
wall = False
elif the_map[newY][newX] > 1 and steps < abs(the_map[newY][newX]):
if(the_map[newY][newX] < 0):
the_map[newY][newX] = steps * -1
if(the_map[newY][newX] > 0):
the_map[newY][newX] = steps
if newX != 0 or newY != 0:
pathFinder(newX, newY, the_map, steps, lastX, lastY, wall)
if debug:
printMap(the_map,"({0:2d},{1:2d}) steps:{2:3d} {3:6} after options ---------------------------------".format(x,y,steps,wall))
def solution(the_map):
debug = False
steps = 1
lastX = len(the_map[0]) - 1
lastY = len(the_map) - 1
x = lastX
y = lastY
testMap = the_map[:]
testMap[y][x] = 1
pathFinder(x, y, testMap, steps, lastX, lastY, False)
if debug:
printMap(the_map,"All done. {0:3d} ------------------------------".format(testMap[0][0]))
return(testMap[0][0])
#print(solution([[0, 1], [0, 0]]))
#print(solution([[0, 1, 1, 0], [0, 0, 0, 1], [1, 1, 0, 0], [1, 1, 1, 0]]))
print(solution([[0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0]]))
#print(solution([[0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0]]))
|
perlygatekeeper/glowing-robot
|
google_test/bunny_escape/bunnyEscape_fixed.py
|
Python
|
artistic-2.0
| 3,411
| 0.013193
|
# -*- coding: utf-8 -*-
# Copyright (C) 2010 by RoboLab - University of Extremadura
#
# This file is part of RoboComp
#
# RoboComp is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RoboComp is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RoboComp. If not, see <http://www.gnu.org/licenses/>.
#
import Ice, sys, math, traceback
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtGui import *
class C(QWidget):
def __init__(self, endpoint, modules):
QWidget.__init__(self)
self.ic = Ice.initialize(sys.argv)
self.mods = modules
print ('Endpoint', )
self.prx = self.ic.stringToProxy(endpoint)
self.proxy = self.mods['RoboCompRoimant'].RoimantPrx.checkedCast(self.prx)
self.leftPyrList = []
self.rightPyrList = []
for level in range(4):
self.leftPyrList.append(None)
self.rightPyrList.append(None)
self.wdth = self.proxy.getRoiParams().width
self.hght = self.proxy.getRoiParams().height
self.job()
def job(self):
output = self.proxy.getBothPyramidsRGBAndLeftROIList()
pos=0
size=self.wdth*self.hght*3
for level in range(4):
self.leftPyrList[level] = output[0][pos:pos+size]
self.rightPyrList[level] = output[2][pos:pos+size]
pos = pos + size
size = size/4
def paintEvent(self, event=None):
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing, True)
xPos = -self.wdth/2
yPos = self.height()
for level in range(len(self.leftPyrList)):
xPos = xPos + (self.wdth/2)/(2**level)
yPos = yPos - self.hght/(2**level)
qimage = QImage(self.leftPyrList[level], self.wdth/(2**level), self.hght/(2**level), QImage.Format_RGB888);
painter.drawImage(QPointF(xPos, yPos), qimage)
qimage = QImage(self.rightPyrList[level], self.wdth/(2**level), self.hght/(2**level), QImage.Format_RGB888);
painter.drawImage(QPointF(xPos+self.wdth, yPos), qimage)
painter.end()
painter = None
|
robocomp/robocomp
|
tools/rcmonitor/examples/pyramidRoiRGB.py
|
Python
|
gpl-3.0
| 2,369
| 0.01984
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from functools import partialmethod
from pymongo.operations import UpdateOne, InsertOne
from .cache import CachedModel
from .errors import ConfigError, ArgumentError
from .metatype import DocumentType, EmbeddedDocumentType
from .fields import EmbeddedField
log = logging.getLogger('yamo')
class classproperty(object):
def __init__(self, fget):
self.fget = fget
def __get__(self, owner_self, owner_cls):
return self.fget(owner_cls)
class MongoOperationMixin(object):
""" Mongodb raw operations """
@classmethod
def run_command(cls, *args, **kwargs):
cmd = kwargs['cmd']
del kwargs['cmd']
return getattr(cls._coll, cmd)(*args, **kwargs)
for cmd in [
'insert_one', 'insert_many',
'find', 'find_one', 'find_one_and_delete',
'find_one_and_replace', 'find_one_and_update',
'update_one', 'update_many', 'replace_one',
'delete_one', 'delete_many',
'create_index', 'create_indexes', 'reindex',
'index_information', 'list_indexes',
'drop', 'drop_index', 'drop_indexes',
'aggregate', 'group', 'inline_map_reduce', 'map_reduce',
'bulk_write',
'initialize_ordered_bulk_op', 'initialize_unordered_bulk_op',
'rename', 'count', 'distinct', 'options', 'with_options',
]:
locals()[cmd] = partialmethod(run_command, cmd=cmd)
class InitMixin(object):
def __init__(self, data=None):
self._refs = {}
self._data = {}
self._defaults = {}
if data:
for name, field in self._fields.items():
if name in data:
value = data[name]
else:
value = field.default
if callable(value):
value = value()
if value is not None:
self._defaults[name] = value
value = field.to_storage(value)
self._data[name] = value
class ValidationMixin(object):
def validate(self):
for name, field in self._fields.items():
if name in self._data:
value = field.to_python(self._data[name])
field.validate(value)
def to_dict(self):
d = {}
for name, field in self._fields.items():
value = field.to_python(self._data.get(name))
if isinstance(value, list):
ovalue, value = value, []
for v in ovalue:
if isinstance(v, EmbeddedDocument):
v = v.to_dict()
value.append(v)
d[name] = value
return d
class MetaMixin(object):
""" helper methods for "Meta" info """
@classproperty
def unique_fields(cls):
names = set()
for idx in cls.Meta._indexes or []:
if idx.kwargs.get('unique'):
for key in idx.keys:
if isinstance(key, tuple):
names.add(key[0])
else:
names.add(key)
return names
@classmethod
def prepare(cls):
cls.ensure_indexes()
cls.ensure_shards()
@classmethod
def ensure_indexes(cls):
allowed_keys = set(['name', 'unique', 'background', 'sparse',
'bucketSize', 'min', 'max', 'expireAfterSeconds'])
for idx in cls.Meta._indexes or []:
if set(idx.kwargs.keys()) - allowed_keys:
raise ArgumentError(MetaMixin.ensure_indexes, idx.kwargs)
cls._coll.create_index(idx.keys, **idx.kwargs)
@classmethod
def ensure_shards(cls):
if cls.Meta._shardkey:
admin = cls._conn.admin
dbname = cls._db.name
try:
admin.command('enableSharding', dbname)
except Exception as e:
if 'already' in e:
try:
admin.command(
'shardCollection',
'{}.{}'.format(dbname,
cls.Meta.__collection__),
key=cls.Meta._shardkey.key)
except Exception as e:
if 'already' not in e:
log.warning('shard collection failed: '
'{}'.format(str(e)))
else:
log.warning('enable shard failed: '
'{}'.format(str(e)))
class MapperMixin(object):
""" ORM only method mixins """
def refresh(self):
_id = self._data.get('_id')
self._data = {}
if _id:
doc = self._coll.find_one({'_id': _id})
if doc:
self._data = doc
self.validate()
@classmethod
def query(cls, *args, **kwargs):
""" Same as collection.find, but return Document then dict """
for doc in cls._coll.find(*args, **kwargs):
yield cls.from_storage(doc)
@classmethod
def query_one(cls, *args, **kwargs):
""" Same as collection.find_one, but return Document then dict """
doc = cls._coll.find_one(*args, **kwargs)
if doc:
return cls.from_storage(doc)
def update(self, update):
""" Update self """
self._coll.update_one({'_id': self._data['_id']},
update)
def upsert(self, null=False):
""" Insert or Update Document
:param null: whether update null values
Wisely select unique field values as filter,
Update with upsert=True
"""
self._pre_save()
self.validate()
filter_ = self._upsert_filter()
if filter_:
update = self._upsert_update(filter_, null)
if update['$set']:
r = self._coll.find_one_and_update(filter_, update,
upsert=True, new=True)
self._data['_id'] = r['_id']
else:
r = self._coll.insert_one(self._data)
self._data['_id'] = r.inserted_id
def save(self):
self._pre_save()
self._ensure_id()
self.validate()
if '_id' in self._data:
doc = self._data.copy()
del doc['_id']
self._coll.update_one({'_id': self._data['_id']},
{'$set': doc},
upsert=True)
else:
self._coll.insert_one(self._data)
@classmethod
def bulk_upsert(cls, docs, null=False):
if len(docs) == 0:
return 0
requests = []
for doc in docs:
if not isinstance(doc, cls):
raise ArgumentError(cls, docs)
doc._pre_save()
doc.validate()
filter_ = doc._upsert_filter()
if filter_:
update = doc._upsert_update(filter_, null)
if update['$set']:
requests.append(UpdateOne(filter_, update, upsert=True))
else:
requests.append(InsertOne(doc._data))
r = cls._coll.bulk_write(requests, ordered=False)
return r.upserted_count
def remove(self):
_id = self._ensure_id()
if _id:
self._coll.delete_one({'_id': _id})
else:
log.warning("This document has no _id, it can't be deleted")
@classmethod
def cached(cls, timeout=60, cache_none=False):
""" Cache queries
:param timeout: cache timeout
:param cache_none: cache None result
Usage::
>>> Model.cached(60).query({...})
"""
return CachedModel(cls=cls, timeout=timeout, cache_none=cache_none)
def _pre_save(self):
for name, field in self._fields.items():
value = field.pre_save_val(self._data.get(name))
if value:
setattr(self, name, value)
if not field.required and name in self._data \
and self._data[name] is None:
del self._data[name]
def _upsert_filter(self):
filter_ = {}
if self._ensure_id():
filter_['_id'] = self._data['_id']
for name in self.unique_fields:
value = self._data.get(name)
if value:
filter_[name] = value
return filter_
def _upsert_update(self, filter_, null=False):
to_update = {}
to_insert = {}
for key, value in self._data.items():
if key not in filter_ and (null or value is not None):
if self._defaults.get(key) == value:
# default value should only been applied if it is an insert
to_insert[key] = value
else:
to_update[key] = value
update = {'$set': to_update}
if to_insert:
update['$setOnInsert'] = to_insert
return update
def _ensure_id(self):
_id = self._data.get('_id')
if not _id and self.Meta._formatter:
try:
_id = self.Meta._formatter._format(**self._data)
except KeyError:
pass
else:
self._data['_id'] = _id
return _id
class EmbeddedDocument(InitMixin, ValidationMixin,
metaclass=EmbeddedDocumentType):
pass
class Document(InitMixin, ValidationMixin, MetaMixin, MapperMixin, MongoOperationMixin,
metaclass=DocumentType):
@classmethod
def from_storage(cls, data):
instance = cls()
instance._data = data
# create reference to embedded values
for key, value in instance._fields.items():
if isinstance(value, EmbeddedField):
instance._refs[key] = value.to_python(data[key])
return instance
@classproperty
def _db(cls):
raise ConfigError('Database not registered, did you run '
'conn.register_all()?')
@classproperty
def _coll(cls):
return cls._db[cls.Meta.__collection__]
def _get_db(self):
return self._db
def _get_coll(self):
return self._coll
|
observerss/yamo
|
yamo/document.py
|
Python
|
mit
| 10,377
| 0.000096
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START aiplatform_cancel_data_labeling_job_sample]
from google.cloud import aiplatform
def cancel_data_labeling_job_sample(
project: str,
data_labeling_job_id: str,
location: str = "us-central1",
api_endpoint: str = "us-central1-aiplatform.googleapis.com",
):
# The AI Platform services require regional API endpoints.
client_options = {"api_endpoint": api_endpoint}
# Initialize client that will be used to create and send requests.
# This client only needs to be created once, and can be reused for multiple requests.
client = aiplatform.gapic.JobServiceClient(client_options=client_options)
name = client.data_labeling_job_path(
project=project, location=location, data_labeling_job=data_labeling_job_id
)
response = client.cancel_data_labeling_job(name=name)
print("response:", response)
# [END aiplatform_cancel_data_labeling_job_sample]
|
sasha-gitg/python-aiplatform
|
samples/snippets/job_service/cancel_data_labeling_job_sample.py
|
Python
|
apache-2.0
| 1,485
| 0.001347
|
from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer
import copy
from PIL import Image
import os
import random
import time
import math
imagesize = (120, 120)
peak = 100
gusti = ["margherita", "crudo", "funghi", "salame", "rucola", "4formaggi", "americana"]
def buildnet():
inputs = len(gusti)
outputs = imagesize[0] * imagesize[1] * 3 # R G B
hiddens = (120 * 3) # lol, I have no idea
return buildNetwork(inputs, hiddens, outputs)
def getSwitchTuple(index, lengt, disturb=0):
ret = []
for i in range(lengt):
if i == index:
ret.append((1.0 + disturb) * peak)
else:
ret.append(0.0)
return tuple(ret)
def buildtrainset():
inputs = len(gusti)
outputs = imagesize[0] * imagesize[1] * 3
ds = SupervisedDataSet(inputs, outputs)
for gusto in gusti:
indice = gusti.index(gusto)
pizzaset = os.listdir("./pizze/" + gusto + "/")
print("Training set for gusto: %s (%s)" % (gusto, ",".join(map(str, getSwitchTuple(indice, inputs)))))
for pizzaname in pizzaset:
pizza = "./pizze/" + gusto + "/" + pizzaname
print(" Training with %s" % pizza, end=" ")
ds.addSample(getSwitchTuple(indice, inputs, disturb=random.uniform(-0.3, 0.3)), processImg(pizza))
print("done")
return ds
def outimage(outtuple, name):
img = Image.new('RGB', imagesize, "white")
pixels = img.load()
for i in range(img.size[0]):
for j in range(img.size[1]):
tup_index = (i*img.size[0] + j) * 3
pixels[i,j] = (int(outtuple[tup_index]), int(outtuple[tup_index + 1]), int(outtuple[tup_index + 2]))
img.save(name)
#img.show()
def calcETA(timestep, remaining):
totsec = timestep * remaining
totmin = math.floor(totsec / 60)
remsec = totsec - (totmin * 60)
return totmin, remsec
def letsrock(rounds=25):
minimum = 999999999999
bestnet = None
print("Initializing neural network...")
net = buildnet()
print("Building training set...")
trset = buildtrainset()
trainer = BackpropTrainer(net, trset)
started = time.time()
for i in range(rounds):
print("training: %d%%... " % ((i*100) / rounds), end="")
err = trainer.train()
timestep = (time.time() - started) / (i+1)
min, sec = calcETA(timestep, rounds - i - 1)
if err < minimum:
minimum = err
bestnet = copy.deepcopy(net)
print("error: %.05f - ETA: %02d:%02d" % (err, min, sec), end="\r")
#trainer.trainUntilConvergence(verbose=True)
print("training: complete! ")
return bestnet
def fullShow():
net = letsrock()
for gusto in gusti:
print("Creating pizza, gusto: %s" % gusto)
indice = gusti.index(gusto)
activ = getSwitchTuple(indice, len(gusti))
name = "oven/" + gusto + ".jpg"
rgb = net.activate(activ)
datum = list(rgb)
outimage(datum, name)
def processImg(filename):
img = Image.open(filename)
img = img.resize(imagesize, Image.ANTIALIAS)
rgb_img = img.convert('RGB')
pixels = []
for x in range(imagesize[0]):
for y in range(imagesize[1]):
tup = tuple(rgb_img.getpixel((x, y)))
pixels.extend(tup)
return tuple(pixels)
if __name__ == "__main__":
fullShow()
|
agentOfChaos/brainPizza
|
brainpizza.py
|
Python
|
gpl-2.0
| 3,474
| 0.004893
|
from allauth.socialaccount.providers.base import AuthAction, ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class Scope(object):
ACCESS = 'read-only'
class YNABAccount(ProviderAccount):
pass
class YNABProvider(OAuth2Provider):
id = 'ynab'
name = 'YNAB'
account_class = YNABAccount
def get_default_scope(self):
scope = [Scope.ACCESS]
return scope
def get_auth_params(self, request, action):
ret = super(YNABProvider, self).get_auth_params(request,
action)
if action == AuthAction.REAUTHENTICATE:
ret['prompt'] = 'select_account consent'
return ret
def extract_uid(self, data):
return str(data['data']['user']['id'])
provider_classes = [YNABProvider]
|
lukeburden/django-allauth
|
allauth/socialaccount/providers/ynab/provider.py
|
Python
|
mit
| 852
| 0
|
import sys
from healthcareai.common.healthcareai_error import HealthcareAIError
def validate_pyodbc_is_loaded():
""" Simple check that alerts user if they are do not have pyodbc installed, which is not a requirement. """
if 'pyodbc' not in sys.modules:
raise HealthcareAIError('Using this function requires installation of pyodbc.')
def validate_sqlite3_is_loaded():
""" Simple check that alerts user if they are do not have sqlite installed, which is not a requirement. """
if 'sqlite3' not in sys.modules:
raise HealthcareAIError('Using this function requires installation of sqlite3.')
|
HealthCatalystSLC/healthcareai-py
|
healthcareai/common/database_library_validators.py
|
Python
|
mit
| 626
| 0.00639
|
"""
Test basic DataFrame functionality.
"""
import pandas as pd
import pytest
import weld.grizzly as gr
def get_frames(cls, strings):
"""
Returns two DataFrames for testing binary operators.
The DataFrames have columns of overlapping/different names, types, etc.
"""
df1 = pd.DataFrame({
'name': ['Bob', 'Sally', 'Kunal', 'Deepak', 'James', 'Pratiksha'],
'lastName': ['Kahn', 'Lopez', 'Smith', 'Narayanan', 'Thomas', 'Thaker'],
'age': [20, 30, 35, 20, 50, 35],
'score': [20.0, 30.0, 35.0, 50.0, 35.0, 25.0]
})
df2 = pd.DataFrame({
'firstName': ['Bob', 'Sally', 'Kunal', 'Deepak', 'James', 'Pratiksha'],
'lastName': ['Kahn', 'Lopez', 'smith', 'narayanan', 'Thomas', 'thaker'],
'age': [25, 30, 45, 20, 60, 35],
'scores': [20.0, 30.0, 35.0, 50.0, 35.0, 25.0]
})
if not strings:
df1 = df1.drop(['name', 'lastName'], axis=1)
df2 = df2.drop(['firstName', 'lastName'], axis=1)
return (cls(df1), cls(df2))
def _test_binop(pd_op, gr_op, strings=True):
"""
Test a binary operator.
Binary operators align on column name. For columns that don't exist in both
DataFrames, the column is filled with NaN (for non-comparison operations) and
or False (for comparison operations).
If the RHS is a Series, the Series should be added to all columns.
"""
df1, df2 = get_frames(pd.DataFrame, strings)
gdf1, gdf2 = get_frames(gr.GrizzlyDataFrame, strings)
expect = pd_op(df1, df2)
result = gr_op(gdf1, gdf2).to_pandas()
assert expect.equals(result)
def test_evaluation():
# Test to make sure that evaluating a DataFrame once caches the result/
# doesn't cause another evaluation.
df1 = gr.GrizzlyDataFrame({
'age': [20, 30, 35, 20, 50, 35],
'score': [20.0, 30.0, 35.0, 50.0, 35.0, 25.0]
})
df2 = gr.GrizzlyDataFrame({
'age': [20, 30, 35, 20, 50, 35],
'scores': [20.0, 30.0, 35.0, 50.0, 35.0, 25.0]
})
df3 = (df1 + df2) * df2 + df1 / df2
assert not df3.is_value
df3.evaluate()
assert df3.is_value
weld_value = df3.weld_value
df3.evaluate()
# The same weld_value should be returned.
assert weld_value is df3.weld_value
def test_add():
_test_binop(pd.DataFrame.add, gr.GrizzlyDataFrame.add, strings=False)
def test_sub():
_test_binop(pd.DataFrame.sub, gr.GrizzlyDataFrame.sub, strings=False)
def test_mul():
_test_binop(pd.DataFrame.mul, gr.GrizzlyDataFrame.mul, strings=False)
def test_div():
_test_binop(pd.DataFrame.div, gr.GrizzlyDataFrame.div, strings=False)
def test_eq():
_test_binop(pd.DataFrame.eq, gr.GrizzlyDataFrame.eq, strings=True)
def test_ne():
_test_binop(pd.DataFrame.ne, gr.GrizzlyDataFrame.ne, strings=True)
def test_le():
_test_binop(pd.DataFrame.le, gr.GrizzlyDataFrame.le, strings=False)
def test_lt():
_test_binop(pd.DataFrame.lt, gr.GrizzlyDataFrame.lt, strings=False)
def test_ge():
_test_binop(pd.DataFrame.ge, gr.GrizzlyDataFrame.ge, strings=False)
def test_gt():
_test_binop(pd.DataFrame.gt, gr.GrizzlyDataFrame.gt, strings=False)
|
weld-project/weld
|
weld-python/tests/grizzly/core/test_frame.py
|
Python
|
bsd-3-clause
| 3,167
| 0.005052
|
#
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010 by Joel Andersson, Moritz Diehl, K.U.Leuven. All rights reserved.
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
from casadi import *
from casadi.tools import *
import casadi as c
from numpy import *
import unittest
from types import *
from helpers import *
class SDPtests(casadiTestCase):
@requires("DSDPSolver")
def test_memleak1(self):
self.message("memleak1")
# Originates from http://sdpa.indsys.chuo-u.ac.jp/sdpa/files/sdpa-c.6.2.0.manual.pdf
b = DMatrix([48,-8,20])
A = vertcat([DMatrix([[10,4],[4,0]]),DMatrix([[0,0],[0,-8]]),DMatrix([[0,-8],[-8,-2]])])
makeSparse(A)
A.printMatrix()
C = DMatrix([[-11,0],[0,23]])
makeSparse(C)
dsp = DSDPSolver(C.sparsity(),A.sparsity())
@requires("DSDPSolver")
def test_memleak2(self):
self.message("memleak1")
# Originates from http://sdpa.indsys.chuo-u.ac.jp/sdpa/files/sdpa-c.6.2.0.manual.pdf
b = DMatrix([48,-8,20])
A = vertcat([DMatrix([[10,4],[4,0]]),DMatrix([[0,0],[0,-8]]),DMatrix([[0,-8],[-8,-2]])])
makeSparse(A)
A.printMatrix()
C = DMatrix([[-11,0],[0,23]])
makeSparse(C)
dsp = DSDPSolver(C.sparsity(),A.sparsity())
dsp.init()
@requires("DSDPSolver")
def test_scalar(self):
self.message("scalar")
#
# min n1*x
# x
# n3*x-n2>=0
#
# -> x = n2/n3
#
# 1 active constraint, cost: d(n1*x)/d(n*x-n2) = 1/[d(n3*x-n2)/d(n1*x)] = n1/n3
n1 = 3.1
n2 = 2.3
n3 = 4.7
b = DMatrix(n1)
Ai = [DMatrix(n3)]
A = vertcat(Ai)
makeSparse(A)
C = DMatrix(n2)
dsp = DSDPSolver(C.sparsity(),A.sparsity())
dsp.init()
dsp.input("c").set(C)
dsp.input("b").set(b)
dsp.input("a").set(A)
dsp.evaluate()
self.checkarray(dsp.output("primal_cost"),DMatrix(n1*n2/n3),digits=5)
self.checkarray(dsp.output("dual_cost"),DMatrix(n1*n2/n3),digits=5)
self.checkarray(dsp.output("primal"),DMatrix(n2/n3),digits=5)
self.checkarray(dsp.output("p"),DMatrix(0),digits=5)
self.checkarray(dsp.output("dual"),DMatrix(n1/n3),digits=5)
@requires("DSDPSolver")
def test_linear_equality(self):
self.message("linear equality")
# min n1*x
# x
#
# n3*x-n2 >= 0 |__ n3*x == n2
# -(n3*x-n2) >= 0 |
#
# solution: x=n2/n3
n3 = 1.7
n1 = 2.1
n2 = 1.3
b = DMatrix([n1])
Ai = [ blkdiag([n3,-n3])]
C = blkdiag([n2,-n2])
A = vertcat(Ai)
dsp = DSDPSolver(C.sparsity(),A.sparsity())
dsp.init()
dsp.input("c").set(C)
dsp.input("b").set(b)
dsp.input("a").set(A)
dsp.evaluate()
self.checkarray(dsp.output("primal_cost"),DMatrix(n1*n2/n3),digits=5)
self.checkarray(dsp.output("dual_cost"),DMatrix(n1*n2/n3),digits=5)
self.checkarray(dsp.output("primal"),DMatrix(n2/n3),digits=5)
self.checkarray(dsp.output("p"),DMatrix.zeros(2,2),digits=5)
self.checkarray(dsp.output("dual")[0,0]-dsp.output("dual")[1,1],DMatrix(n1/n3),digits=5)
@requires("DSDPSolver")
def test_linear_interpolation1(self):
self.message("linear interpolation1")
# min 2*x0 + x1*3
# x0,x1
# x0+x1 - 1 >=0 --> x0+x1>=1
# x0 >=0
# x1 >=0
#
# solution: x0=1, x1=0
b = DMatrix([2,3])
Ai = [ blkdiag([1,1,0]), blkdiag([1,0,1])]
C = blkdiag([1,0,0])
A = vertcat(Ai)
dsp = DSDPSolver(C.sparsity(),A.sparsity())
dsp.init()
dsp.input("c").set(C)
dsp.input("b").set(b)
dsp.input("a").set(A)
dsp.evaluate()
self.checkarray(dsp.output("primal_cost"),DMatrix(2),digits=5)
self.checkarray(dsp.output("dual_cost"),DMatrix(2),digits=5)
self.checkarray(dsp.output("primal"),DMatrix([1,0]),digits=5)
self.checkarray(dsp.output("p"),DMatrix([[0,0,0],[0,1,0],[0,0,0]]),digits=5)
self.checkarray(dsp.output("dual"),DMatrix([[2,0,0],[0,0,0],[0,0,1]]),digits=5)
@requires("DSDPSolver")
def test_linear_interpolation2(self):
self.message("linear interpolation2")
# min 2*x0 + 3*x1
# x0,x1
# -(x0 + x1 -1) >=0 --> x0 + x1 <= 1
# x0 >=0
# x1 >=0
#
# solution: x0=0 , x1=0
b = DMatrix([2,3])
Ai = [ blkdiag([-1,1,0]), blkdiag([-1,0,1])]
C = blkdiag([-1,0,0])
A = vertcat(Ai)
dsp = DSDPSolver(C.sparsity(),A.sparsity())
dsp.init()
dsp.input("c").set(C)
dsp.input("b").set(b)
dsp.input("a").set(A)
dsp.evaluate()
self.checkarray(dsp.output("primal_cost"),DMatrix(0),digits=5)
self.checkarray(dsp.output("dual_cost"),DMatrix(0),digits=5)
self.checkarray(dsp.output("primal"),DMatrix([0,0]),digits=5)
self.checkarray(dsp.output("p"),DMatrix([[1,0,0],[0,0,0],[0,0,0]]),digits=5)
self.checkarray(dsp.output("dual"),DMatrix([[0,0,0],[0,2,0],[0,0,3]]),digits=5)
@requires("DSDPSolver")
def test_linear_interpolation(self):
self.message("linear interpolation")
# min 2*a + (1-a)*4
# a
# 0 <= a <= 1
#
# Translates to:
# min 2*x0 + 4*x1
# x0,x1
# x0 + x1 -1 >= 0 |__ x0 + x1 == 1
# -(x0 + x1 -1) >= 0 |
# x0 >= 0
# x1 >= 0
b = DMatrix([2,4])
Ai = [ blkdiag([1,-1,1,0]), blkdiag([1,-1,0,1])]
e = 1e-6
C = blkdiag([1,-(1+e),0,0])
A = vertcat(Ai)
dsp = DSDPSolver(C.sparsity(),A.sparsity())
dsp.init()
dsp.input("c").set(C)
dsp.input("b").set(b)
dsp.input("a").set(A)
dsp.evaluate()
self.checkarray(dsp.output("primal_cost"),DMatrix(2),digits=5)
self.checkarray(dsp.output("dual_cost"),DMatrix(2),digits=5)
self.checkarray(dsp.output("primal"),DMatrix([1,0]),digits=5)
self.checkarray(dsp.output("p"),diag([0,0,1,0]),digits=5)
self.checkarray(dsp.output("dual"),diag([2,0,0,2]),digits=2)
@requires("DSDPSolver")
def test_example1(self):
self.message("Example1")
# Originates from http://sdpa.indsys.chuo-u.ac.jp/sdpa/files/sdpa-c.6.2.0.manual.pdf
b = DMatrix([48,-8,20])
Ai = [DMatrix([[10,4],[4,0]]),DMatrix([[0,0],[0,-8]]),DMatrix([[0,-8],[-8,-2]])]
A = vertcat(Ai)
makeSparse(A)
A.printMatrix()
C = DMatrix([[-11,0],[0,23]])
makeSparse(C)
dsp = DSDPSolver(C.sparsity(),A.sparsity())
dsp.init()
dsp.input("c").set(C)
dsp.input("b").set(b)
dsp.input("a").set(A)
dsp.evaluate()
self.checkarray(dsp.output("primal_cost"),DMatrix(-41.9),digits=5)
self.checkarray(dsp.output("dual_cost"),DMatrix(-41.9),digits=5)
self.checkarray(dsp.output("primal"),DMatrix([-1.1,-2.7375,-0.55]),digits=5)
self.checkarray(dsp.output("dual"),DMatrix([[5.9,-1.375],[-1.375,1]]),digits=5)
self.checkarray(dsp.output("p"),DMatrix.zeros(2,2),digits=5)
V = struct_ssym([
entry("L",shape=C.shape),
entry("x",shape=b.size())
])
L = V["L"]
x = V["x"]
P = mul(L,L.T)
g = []
g.append(sum([Ai[i]*x[i] for i in range(3)]) - C - P)
f = SXFunction([V],[mul(b.T,x)])
g = SXFunction([V],[veccat(g)])
sol = IpoptSolver(f,g)
sol.init()
sol.setInput(0,"lbg")
sol.setInput(0,"ubg")
sol.setInput(1,"x0")
sol.evaluate()
sol_ = V(sol.output())
self.checkarray(sol_["x"],DMatrix([-1.1,-2.7375,-0.55]),digits=5)
@requires("DSDPSolver")
def test_example2(self):
self.message("Example2")
# Originates from http://sdpa.indsys.chuo-u.ac.jp/sdpa/files/sdpa-c.6.2.0.manual.pdf
b = DMatrix([1.1, -10, 6.6 , 19 , 4.1])
C = blkdiag([DMatrix([[-1.4,-3.2],[-3.2,-28]]),DMatrix([[15,-12,2.1],[-12,16,-3.8],[2.1,-3.8,15]]),1.8,-4.0]);
sp = C.sparsity()
flatdata = [[0.5,5.2,5.2,-5.3,7.8,-2.4,6.0,-2.4,4.2,6.5,6.0,6.5,2.1,-4.5,-3.5],
[1.7,7.0,7.0,-9.3,-1.9,-0.9,-1.3,-0.9,-0.8,-2.1,-1.3,-2.1,4.0,-0.2,-3.7],
[6.3,-7.5,-7.5,-3.3,0.2,8.8,5.4,8.8,3.4,-0.4,5.4,-0.4,7.5,-3.3,-4.0],
[-2.4,-2.5,-2.5,-2.9,3.4,-3.2,-4.5,-3.2,3.0,-4.8,-4.5,-4.8,3.6,4.8,9.7],
[-6.5,-5.4,-5.4,-6.6,6.7,-7.2,-3.6,-7.2,7.3,-3.0,-3.6,-3.0,-1.4,6.1,-1.5]]
A = vertcat([DMatrix(sp,data) for data in flatdata])
makeSparse(A)
dsp = DSDPSolver(C.sparsity(),A.sparsity())
dsp.init()
dsp.input("c").set(C)
dsp.input("b").set(b)
dsp.input("a").set(A)
dsp.evaluate()
DMatrix.setPrecision(10)
self.checkarray(dsp.output("primal_cost"),DMatrix(3.20626934048e1),digits=5)
self.checkarray(dsp.output("dual_cost"),DMatrix(3.20626923535e1),digits=5)
self.checkarray(dsp.output("primal"),DMatrix([1.551644595,0.6709672545,0.9814916693,1.406569511,0.9421687787]),digits=5)
self.checkarray(dsp.output("dual"),DMatrix(sp,[2.640261206,0.5605636589,0.5605636589,3.717637107,0.7615505416,-1.513524657,1.139370202,-1.513524657,3.008016978,-2.264413045,1.139370202,-2.264413045,1.704633559,0,0]),digits=5)
self.checkarray(dsp.output("p"),DMatrix(sp,[0,0,0,0,7.119155551,5.024671489,1.916294752,5.024671489,4.414745792,2.506021978,1.916294752,2.506021978,2.048124139,0.3432465654,4.391169489]),digits=5)
@requires("DSDPSolver")
def test_example2_perm(self):
self.message("Example2_permuted")
# Originates from http://sdpa.indsys.chuo-u.ac.jp/sdpa/files/sdpa-c.6.2.0.manual.pdf
b = DMatrix([1.1, -10, 6.6 , 19 , 4.1])
perm = [5,2,1,0,6,3,4]
permi = lookupvector(perm,len(perm))
C = blkdiag([DMatrix([[-1.4,-3.2],[-3.2,-28]]),DMatrix([[15,-12,2.1],[-12,16,-3.8],[2.1,-3.8,15]]),1.8,-4.0]);
sp = C.sparsity()
flatdata = [[0.5,5.2,5.2,-5.3,7.8,-2.4,6.0,-2.4,4.2,6.5,6.0,6.5,2.1,-4.5,-3.5],
[1.7,7.0,7.0,-9.3,-1.9,-0.9,-1.3,-0.9,-0.8,-2.1,-1.3,-2.1,4.0,-0.2,-3.7],
[6.3,-7.5,-7.5,-3.3,0.2,8.8,5.4,8.8,3.4,-0.4,5.4,-0.4,7.5,-3.3,-4.0],
[-2.4,-2.5,-2.5,-2.9,3.4,-3.2,-4.5,-3.2,3.0,-4.8,-4.5,-4.8,3.6,4.8,9.7],
[-6.5,-5.4,-5.4,-6.6,6.7,-7.2,-3.6,-7.2,7.3,-3.0,-3.6,-3.0,-1.4,6.1,-1.5]]
A = vertcat([DMatrix(sp,data)[perm,perm] for data in flatdata])
makeSparse(A)
C = C[perm,perm]
dsp = DSDPSolver(C.sparsity(),A.sparsity())
dsp.init()
dsp.input("c").set(C)
dsp.input("b").set(b)
dsp.input("a").set(A)
dsp.evaluate()
DMatrix.setPrecision(10)
self.checkarray(dsp.output("primal_cost"),DMatrix(3.20626934048e1),digits=5)
self.checkarray(dsp.output("dual_cost"),DMatrix(3.20626923535e1),digits=5)
self.checkarray(dsp.output("primal"),DMatrix([1.551644595,0.6709672545,0.9814916693,1.406569511,0.9421687787]),digits=5)
self.checkarray(dsp.output("dual")[permi,permi],DMatrix(sp,[2.640261206,0.5605636589,0.5605636589,3.717637107,0.7615505416,-1.513524657,1.139370202,-1.513524657,3.008016978,-2.264413045,1.139370202,-2.264413045,1.704633559,0,0]),digits=5)
self.checkarray(dsp.output("p")[permi,permi],DMatrix(sp,[0,0,0,0,7.119155551,5.024671489,1.916294752,5.024671489,4.414745792,2.506021978,1.916294752,2.506021978,2.048124139,0.3432465654,4.391169489]),digits=5)
if __name__ == '__main__':
unittest.main()
|
jgillis/casadi
|
test/python/sdp.py
|
Python
|
lgpl-3.0
| 12,021
| 0.049746
|
#! /usr/bin/env python
class ParserError(Exception):
pass
class Sentence(object):
def __init__(self, subject, verb, object):
# remember we take ('noun', 'princess') tuples and convert them
self.subject = subject[1]
self.verb = verb[1]
self.object = object[1]
def get_sentence(self):
self.sentence = ' '.join([self.subject, self.verb, self.object])
return self.sentence
def peek(word_list):
if word_list:
word = word_list[0]
return word[0]
else:
return None
def match(word_list, expecting):
if word_list:
word = word_list.pop(0)
if word[0] == expecting:
return word
else:
return None
else:
return None
def skip(word_list, word_type):
while peek(word_list) == word_type:
match(word_list, word_type)
def parse_verb(word_list):
skip(word_list, 'stop')
if peek(word_list) == 'verb':
return match(word_list, 'verb')
else:
raise ParserError("Expected a verb next.")
def parse_object(word_list):
skip(word_list, 'stop')
next = peek(word_list)
if next == 'noun':
return match(word_list, 'noun')
elif next == 'direction':
return match(word_list, 'direction')
else:
raise ParserError("Expected a noun or direction next.")
def parse_subject(word_list, subj):
verb = parse_verb(word_list)
obj = parse_object(word_list)
return Sentence(subj, verb, obj)
def parse_sentence(word_list):
skip(word_list, 'stop')
start = peek(word_list)
if start == 'noun':
subj = match(word_list, 'noun')
return parse_subject(word_list, subj)
elif start == 'verb':
# assume the subject is the player then
return parse_subject(word_list, ('noun', 'player'))
else:
raise ParserError("Must start with subject, object or verb not: %s" % start)
|
pedrogideon7/spy_quest
|
parser.py
|
Python
|
mit
| 1,938
| 0.004128
|
from grslra import testdata
from grslra.grslra_batch import grslra_batch, slra_by_factorization
from grslra.structures import Hankel
from grslra.scaling import Scaling
import numpy as np
import time
# The goal of this experiment is to identify an LTI system from a noisy outlier-contaminated and subsampled observation of its impulse response
PROFILE = 0
if PROFILE:
import cProfile
N = 80
m = 20
k = 5
sigma=0.05
outlier_rate = 0.05
outlier_amplitude = 1
rate_Omega=0.5
N_f = 20
scaling = Scaling(centering=True)
p = 0.1
x, x_0, U, Y = testdata.testdata_lti_outliers(N + N_f, m, k, rho=outlier_rate, amplitude=outlier_amplitude, sigma=sigma)
# determine scaling factor
scaling.scale_reference(x)
mu = (1-p) * (3 * sigma / scaling.factor) ** 2
# draw sampling set
card_Omega = np.int(np.round(rate_Omega * N))
Omega = np.random.choice(N, card_Omega, replace=False)
# create binary support vectors for Omega and Omega_not
entries = np.zeros((N + N_f, ))
entries[Omega] = 1
entries_not = np.ones_like(entries) - entries
# set unobserved entries in x to zero
x *= entries
x_Omega = x[Omega]
n = N + N_f - m + 1
hankel = Hankel(m, n)
grslra_params = {"PRINT": None, "VERBOSE": 1}
if PROFILE:
profile = cProfile.Profile()
profile.enable()
t_start = time.time()
l_grslra, U, Y = grslra_batch(x_Omega, hankel, k, p, mu, params=grslra_params, Omega=Omega, x_0=x_0, scaling=scaling)
t_grslra = time.time() - t_start
if PROFILE:
profile.disable()
profile.dump_stats("grslra.bin")
print "error GRSLRA: ", np.linalg.norm(l_grslra - x_0) / np.linalg.norm(x_0)
print "time GRSLRA: ", t_grslra
if PROFILE:
profile = cProfile.Profile()
profile.enable()
t_start = time.time()
l_slrabyF = slra_by_factorization(x_Omega, m, k, PRINT=0, x_0=x_0, Omega=Omega, N=N + N_f)
t_slrabyf = time.time() - t_start
if PROFILE:
profile.disable()
profile.dump_stats("slrabyf.bin")
print "error SLRA by F: ", np.linalg.norm(l_slrabyF - x_0) / np.linalg.norm(x_0)
print "time SLRA by F: ", t_slrabyf
np.savez('result_sysid_lti.npz', x_Omega=x_Omega, Omega=Omega, x_0=x_0, t_grslra=t_grslra, l_grslra=l_grslra, t_slrabyf=t_slrabyf, l_slrabyF=l_slrabyF)
|
clemenshage/grslra
|
experiments/6_grslra/system_identification_lti/system_identification.py
|
Python
|
mit
| 2,175
| 0.004138
|
"""LaTeX Exporter class"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import os
from traitlets import Unicode, default
from traitlets.config import Config
from nbconvert.filters.highlight import Highlight2Latex
from nbconvert.filters.filter_links import resolve_references
from .templateexporter import TemplateExporter
class LatexExporter(TemplateExporter):
"""
Exports to a Latex template. Inherit from this class if your template is
LaTeX based and you need custom tranformers/filters. Inherit from it if
you are writing your own HTML template and need custom tranformers/filters.
If you don't need custom tranformers/filters, just change the
'template_file' config option. Place your template in the special "/latex"
subfolder of the "../templates" folder.
"""
@default('file_extension')
def _file_extension_default(self):
return '.tex'
@default('template_file')
def _template_file_default(self):
return 'article.tplx'
# Latex constants
@default('default_template_path')
def _default_template_path_default(self):
return os.path.join("..", "templates", "latex")
@default('template_skeleton_path')
def _template_skeleton_path_default(self):
return os.path.join("..", "templates", "latex", "skeleton")
#Extension that the template files use.
template_extension = Unicode(".tplx").tag(config=True)
output_mimetype = 'text/latex'
def default_filters(self):
for x in super(LatexExporter, self).default_filters():
yield x
yield ('resolve_references', resolve_references)
@property
def default_config(self):
c = Config({
'NbConvertBase': {
'display_data_priority' : ['text/latex', 'application/pdf', 'image/png', 'image/jpeg', 'image/svg+xml', 'text/markdown', 'text/plain']
},
'ExtractOutputPreprocessor': {
'enabled':True
},
'SVG2PDFPreprocessor': {
'enabled':True
},
'LatexPreprocessor': {
'enabled':True
},
'SphinxPreprocessor': {
'enabled':True
},
'HighlightMagicsPreprocessor': {
'enabled':True
}
})
c.merge(super(LatexExporter,self).default_config)
return c
def from_notebook_node(self, nb, resources=None, **kw):
langinfo = nb.metadata.get('language_info', {})
lexer = langinfo.get('pygments_lexer', langinfo.get('name', None))
self.register_filter('highlight_code',
Highlight2Latex(pygments_lexer=lexer, parent=self))
return super(LatexExporter, self).from_notebook_node(nb, resources, **kw)
def _create_environment(self):
environment = super(LatexExporter, self)._create_environment()
# Set special Jinja2 syntax that will not conflict with latex.
environment.block_start_string = "((*"
environment.block_end_string = "*))"
environment.variable_start_string = "((("
environment.variable_end_string = ")))"
environment.comment_start_string = "((="
environment.comment_end_string = "=))"
return environment
|
nitin-cherian/LifeLongLearning
|
Python/PythonProgrammingLanguage/Encapsulation/encap_env/lib/python3.5/site-packages/nbconvert/exporters/latex.py
|
Python
|
mit
| 3,419
| 0.005557
|
# Copyright 2016 Huawei Technologies India Pvt. Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_config import cfg
from ryu.services.protocols.bgp import bgpspeaker
from ryu.services.protocols.bgp.rtconf.neighbors import CONNECT_MODE_ACTIVE
from neutron.services.bgp.agent import config as bgp_config
from neutron.services.bgp.driver import exceptions as bgp_driver_exc
from neutron.services.bgp.driver.ryu import driver as ryu_driver
from neutron.tests import base
# Test variables for BGP Speaker
FAKE_LOCAL_AS1 = 12345
FAKE_LOCAL_AS2 = 23456
FAKE_ROUTER_ID = '1.1.1.1'
# Test variables for BGP Peer
FAKE_PEER_AS = 45678
FAKE_PEER_IP = '2.2.2.5'
FAKE_AUTH_TYPE = 'md5'
FAKE_PEER_PASSWORD = 'awesome'
# Test variables for Route
FAKE_ROUTE = '2.2.2.0/24'
FAKE_NEXTHOP = '5.5.5.5'
class TestRyuBgpDriver(base.BaseTestCase):
def setUp(self):
super(TestRyuBgpDriver, self).setUp()
cfg.CONF.register_opts(bgp_config.BGP_PROTO_CONFIG_OPTS, 'BGP')
cfg.CONF.set_override('bgp_router_id', FAKE_ROUTER_ID, 'BGP')
self.ryu_bgp_driver = ryu_driver.RyuBgpDriver(cfg.CONF.BGP)
mock_ryu_speaker_p = mock.patch.object(bgpspeaker, 'BGPSpeaker')
self.mock_ryu_speaker = mock_ryu_speaker_p.start()
def test_add_new_bgp_speaker(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.mock_ryu_speaker.assert_called_once_with(
as_number=FAKE_LOCAL_AS1, router_id=FAKE_ROUTER_ID,
bgp_server_port=0,
best_path_change_handler=ryu_driver.best_path_change_cb,
peer_down_handler=ryu_driver.bgp_peer_down_cb,
peer_up_handler=ryu_driver.bgp_peer_up_cb)
def test_remove_bgp_speaker(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1)
self.ryu_bgp_driver.delete_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(0,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.assertEqual(1, speaker.shutdown.call_count)
def test_add_bgp_peer_without_password(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.ryu_bgp_driver.add_bgp_peer(FAKE_LOCAL_AS1,
FAKE_PEER_IP,
FAKE_PEER_AS)
speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1)
speaker.neighbor_add.assert_called_once_with(
address=FAKE_PEER_IP,
remote_as=FAKE_PEER_AS,
password=None,
connect_mode=CONNECT_MODE_ACTIVE)
def test_add_bgp_peer_with_password(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.ryu_bgp_driver.add_bgp_peer(FAKE_LOCAL_AS1,
FAKE_PEER_IP,
FAKE_PEER_AS,
FAKE_AUTH_TYPE,
FAKE_PEER_PASSWORD)
speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1)
speaker.neighbor_add.assert_called_once_with(
address=FAKE_PEER_IP,
remote_as=FAKE_PEER_AS,
password=FAKE_PEER_PASSWORD,
connect_mode=CONNECT_MODE_ACTIVE)
def test_remove_bgp_peer(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.ryu_bgp_driver.delete_bgp_peer(FAKE_LOCAL_AS1, FAKE_PEER_IP)
speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1)
speaker.neighbor_del.assert_called_once_with(address=FAKE_PEER_IP)
def test_advertise_route(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.ryu_bgp_driver.advertise_route(FAKE_LOCAL_AS1,
FAKE_ROUTE,
FAKE_NEXTHOP)
speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1)
speaker.prefix_add.assert_called_once_with(prefix=FAKE_ROUTE,
next_hop=FAKE_NEXTHOP)
def test_withdraw_route(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.ryu_bgp_driver.withdraw_route(FAKE_LOCAL_AS1, FAKE_ROUTE)
speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1)
speaker.prefix_del.assert_called_once_with(prefix=FAKE_ROUTE)
def test_add_same_bgp_speakers_twice(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertRaises(bgp_driver_exc.BgpSpeakerAlreadyScheduled,
self.ryu_bgp_driver.add_bgp_speaker, FAKE_LOCAL_AS1)
def test_add_different_bgp_speakers_when_one_already_added(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertRaises(bgp_driver_exc.BgpSpeakerMaxScheduled,
self.ryu_bgp_driver.add_bgp_speaker,
FAKE_LOCAL_AS2)
def test_add_bgp_speaker_with_invalid_asnum_paramtype(self):
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.add_bgp_speaker, '12345')
def test_add_bgp_speaker_with_invalid_asnum_range(self):
self.assertRaises(bgp_driver_exc.InvalidParamRange,
self.ryu_bgp_driver.add_bgp_speaker, -1)
self.assertRaises(bgp_driver_exc.InvalidParamRange,
self.ryu_bgp_driver.add_bgp_speaker, 65536)
def test_add_bgp_peer_with_invalid_paramtype(self):
# Test with an invalid asnum data-type
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, '12345')
# Test with an invalid auth-type and an invalid password
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS,
'sha-1', 1234)
# Test with an invalid auth-type and a valid password
self.assertRaises(bgp_driver_exc.InvaildAuthType,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS,
'hmac-md5', FAKE_PEER_PASSWORD)
# Test with none auth-type and a valid password
self.assertRaises(bgp_driver_exc.InvaildAuthType,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS,
'none', FAKE_PEER_PASSWORD)
# Test with none auth-type and an invalid password
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS,
'none', 1234)
# Test with a valid auth-type and no password
self.assertRaises(bgp_driver_exc.PasswordNotSpecified,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS,
FAKE_AUTH_TYPE, None)
def test_add_bgp_peer_with_invalid_asnum_range(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertRaises(bgp_driver_exc.InvalidParamRange,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, -1)
self.assertRaises(bgp_driver_exc.InvalidParamRange,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, 65536)
def test_add_bgp_peer_without_adding_speaker(self):
self.assertRaises(bgp_driver_exc.BgpSpeakerNotAdded,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS)
def test_remove_bgp_peer_with_invalid_paramtype(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.delete_bgp_peer,
FAKE_LOCAL_AS1, 12345)
def test_remove_bgp_peer_without_adding_speaker(self):
self.assertRaises(bgp_driver_exc.BgpSpeakerNotAdded,
self.ryu_bgp_driver.delete_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP)
def test_advertise_route_with_invalid_paramtype(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.advertise_route,
FAKE_LOCAL_AS1, 12345, FAKE_NEXTHOP)
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.advertise_route,
FAKE_LOCAL_AS1, FAKE_ROUTE, 12345)
def test_advertise_route_without_adding_speaker(self):
self.assertRaises(bgp_driver_exc.BgpSpeakerNotAdded,
self.ryu_bgp_driver.advertise_route,
FAKE_LOCAL_AS1, FAKE_ROUTE, FAKE_NEXTHOP)
def test_withdraw_route_with_invalid_paramtype(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.withdraw_route,
FAKE_LOCAL_AS1, 12345)
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.withdraw_route,
FAKE_LOCAL_AS1, 12345)
def test_withdraw_route_without_adding_speaker(self):
self.assertRaises(bgp_driver_exc.BgpSpeakerNotAdded,
self.ryu_bgp_driver.withdraw_route,
FAKE_LOCAL_AS1, FAKE_ROUTE)
def test_add_multiple_bgp_speakers(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.assertRaises(bgp_driver_exc.BgpSpeakerMaxScheduled,
self.ryu_bgp_driver.add_bgp_speaker,
FAKE_LOCAL_AS2)
self.assertRaises(bgp_driver_exc.BgpSpeakerNotAdded,
self.ryu_bgp_driver.delete_bgp_speaker,
FAKE_LOCAL_AS2)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.ryu_bgp_driver.delete_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(0,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
|
wolverineav/neutron
|
neutron/tests/unit/services/bgp/driver/ryu/test_driver.py
|
Python
|
apache-2.0
| 12,381
| 0.000888
|
"""
Asynchronous functions for bulk changes to the database.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import zip
from builtins import object
from curation.tasks import update_instance, bulk_change_tracking_state, bulk_prepend_record_history, save_creation_to_citation
from django import forms
from django.http import QueryDict
from isisdata.models import *
import isisdata.tasks as dtasks
import curation.taskslib.citation_tasks as ctasks
import curation.taskslib.authority_tasks as atasks
from isisdata.filters import CitationFilter
import json
# TODO: refactor these actions to use bulk apply methods and then explicitly
# trigger search indexing (or whatever other post-save actions are needed).
class BaseAction(object):
def __init__(self):
if hasattr(self, 'default_value_field'):
self.value_field = self.default_value_field
if hasattr(self, 'default_value_field_kwargs'):
self.value_field_kwargs = self.default_value_field_kwargs
if hasattr(self, 'extra'):
self.extra_fields = self.extra
def get_value_field(self, **kwargs):
self.value_field_kwargs.update(kwargs)
return self.value_field(**self.value_field_kwargs)
def get_extra_fields(self, **kwargs):
if hasattr(self, 'extra_fields'):
return [(name, field(**kwargs)) for name, field, kwargs in self.extra_fields]
return []
def _build_filter_label(filter_params_raw):
citation_filter = CitationFilter(QueryDict(filter_params_raw, mutable=True))
filter_form = citation_filter.form
filter_data = {}
if filter_form.is_valid():
filter_data = filter_form.cleaned_data
return ', '.join([ '%s: %s' % (key, value) for key, value in list(filter_data.items()) if value ])
class PrependToRecordHistory(BaseAction):
model = Citation
label = u'Update record history'
default_value_field = forms.CharField
default_value_field_kwargs = {
'label': 'Prepend to record history',
'widget': forms.widgets.Textarea(attrs={'class': 'action-value'}),
}
def apply(self, user, filter_params_raw, value, **extra):
task = AsyncTask.objects.create()
type = extra['object_type'] if extra['object_type'] else None
if type:
result = bulk_prepend_record_history.delay(user.id, filter_params_raw,
value, task.id, type)
else:
result = bulk_prepend_record_history.delay(user.id, filter_params_raw,
value, task.id)
# We can use the AsyncResult's UUID to access this task later, e.g.
# to check the return value or task state.
task.async_uuid = result.id
task.value = ('record_status_explanation', value)
task.label = 'Updating set with filters: ' + _build_filter_label(filter_params_raw)
task.save()
return task.id
class StoreCreationDataToModel(BaseAction):
model = Citation
label = u'Store creation data to citations'
default_value_field = forms.CharField
default_value_field_kwargs = {
'label': 'Storing creation data to citations',
'widget': forms.widgets.Textarea(attrs={'class': 'action-value', 'readonly': True, 'initial': 'Storing creation data'}),
}
def apply(self, user, filter_params_raw, value, **extra):
task = AsyncTask.objects.create()
type = extra['object_type'] if extra['object_type'] else None
if type:
result = save_creation_to_citation.delay(user.id, filter_params_raw,
value, task.id, type)
result = save_creation_to_citation.delay(user.id, filter_params_raw,
value, task.id)
# We can use the AsyncResult's UUID to access this task later, e.g.
# to check the return value or task state.
task.async_uuid = result.id
task.value = ('created_native', '')
task.label = 'Storing creator in citation for set with filters: ' + _build_filter_label(filter_params_raw)
task.save()
return task.id
class SetRecordStatus(BaseAction):
model = Citation
label = u'Set record status'
default_value_field = forms.ChoiceField
default_value_field_kwargs = {
'choices': CuratedMixin.STATUS_CHOICES,
'label': 'Set record status',
'widget': forms.widgets.Select(attrs={'class': 'action-value'}),
}
def apply(self, user, filter_params_raw, value, **extra):
# We need this to exist first so that we can keep it up to date as the
# group of tasks is executed.
task = AsyncTask.objects.create()
type = extra['object_type'] if extra['object_type'] else None
if type:
result = dtasks.bulk_update_citations.delay(user.id,
filter_params_raw,
'record_status_value',
value, task.id, type)
else:
result = dtasks.bulk_update_citations.delay(user.id,
filter_params_raw,
'record_status_value',
value, task.id)
# We can use the AsyncResult's UUID to access this task later, e.g.
# to check the return value or task state.
task.async_uuid = result.id
task.value = ('record_status_value', value)
task.label = 'Updating set with filters: ' + _build_filter_label(filter_params_raw)
task.save()
return task.id
class SetRecordStatusExplanation(BaseAction):
model = Citation
label = u'Set record status explanation'
default_value_field = forms.CharField
default_value_field_kwargs = {
'label': 'Set record status explanation',
'widget': forms.widgets.TextInput(attrs={'class': 'action-value'}),
}
def apply(self, user, filter_params_raw, value, **extra):
task = AsyncTask.objects.create()
type = extra['object_type'] if extra['object_type'] else None
if type:
result = dtasks.bulk_update_citations.delay(user.id,
filter_params_raw,
'record_status_explanation',
value, task.id, type)
else:
result = dtasks.bulk_update_citations.delay(user.id,
filter_params_raw,
'record_status_explanation',
value, task.id)
# We can use the AsyncResult's UUID to access this task later, e.g.
# to check the return value or task state.
task.async_uuid = result.id
task.value = ('record_status_explanation', value)
task.label = 'Updating set with filters: ' + _build_filter_label(filter_params_raw)
task.save()
return task.id
def get_tracking_transition_counts(qs):
states = list(zip(*qs.model.TRACKING_CHOICES))[0]
transitions = dict(list(zip(states, [qs.filter(tracking_state=state).count() for state in states])))
# bugfix for Zotero imports: tracking_state is None not "NO"
transitions[qs.model.NONE] += qs.filter(tracking_state=None).count()
return transitions
def get_allowable_transition_states():
from curation.tracking import TrackingWorkflow
return dict([(target, source) for source, target in TrackingWorkflow.transitions])
def get_transition_labels():
from curation.tracking import TrackingWorkflow
return dict(Tracking.TYPE_CHOICES)
class SetTrackingStatus(BaseAction):
model = Citation
label = u'Set record tracking status'
default_value_field = forms.ChoiceField
default_value_field_kwargs = {
'choices': Tracking.TYPE_CHOICES,
'label': 'Set record tracking status',
'widget': forms.widgets.Select(attrs={'class': 'action-value'}),
}
extra_js = 'curation/js/bulktracking.js'
extra_fields = (
('info', forms.CharField, {'label': 'Tracking Info', 'required': False, 'widget': forms.widgets.TextInput(attrs={'class': 'form-control', 'part_of': 'SetTrackingStatus', 'required': False})}),
('notes', forms.CharField, {'label': 'Tracking Notes', 'required': False, 'widget': forms.widgets.Textarea(attrs={'class': 'form-control', 'part_of': 'SetTrackingStatus', 'required': False})}),
)
@staticmethod
def get_extra_data(queryset=None, **kwargs):
transition_counts = json.dumps(get_tracking_transition_counts(queryset))
allowable_states = json.dumps(get_allowable_transition_states())
transition_labels = json.dumps(get_transition_labels())
return """
var settrackingstatus_data = {
transition_counts: %s,
allowable_states: %s,
transition_labels: %s
}""" % (transition_counts, allowable_states, transition_labels)
def apply(self, user, filter_params_raw, value, info='', notes='', **extra):
task = AsyncTask.objects.create()
type = extra['object_type'] if extra['object_type'] else None
if type:
result = bulk_change_tracking_state.delay(user.id, filter_params_raw, value, info, notes, task.id, type)
else:
result = bulk_change_tracking_state.delay(user.id, filter_params_raw, value, info, notes, task.id)
# We can use the AsyncResult's UUID to access this task later, e.g.
# to check the return value or task state.
task.async_uuid = result.id
task.value = ('record_status_explanation', value)
task.label = 'Updating set with filters: ' + _build_filter_label(filter_params_raw)
task.save()
return task.id
class ReindexCitation(BaseAction):
model = Citation
label = u'Reindex citations'
default_value_field = forms.CharField
default_value_field_kwargs = {
'label': 'Reindex citations',
'widget': forms.widgets.Textarea(attrs={'class': 'action-value', 'readonly': True, 'initial': 'Reindex citations'}),
}
def apply(self, user, filter_params_raw, value, **extra):
task = AsyncTask.objects.create()
result = ctasks.reindex_citations.delay(user.id, filter_params_raw, task.id)
# We can use the AsyncResult's UUID to access this task later, e.g.
# to check the return value or task state.
task.async_uuid = result.id
task.value = ('reindex_citations', value)
task.label = 'Reindexing citations: ' + _build_filter_label(filter_params_raw)
task.save()
return task.id
class ReindexAuthorities(BaseAction):
model = Authority
label = u'Reindex authorities'
default_value_field = forms.CharField
default_value_field_kwargs = {
'label': 'Reindex authorities',
'widget': forms.widgets.Textarea(attrs={'class': 'action-value', 'readonly': True, 'initial': 'Reindex authorities'}),
}
def apply(self, user, filter_params_raw, value, **extra):
task = AsyncTask.objects.create()
result = atasks.reindex_authorities.delay(user.id, filter_params_raw, task.id)
# We can use the AsyncResult's UUID to access this task later, e.g.
# to check the return value or task state.
task.async_uuid = result.id
task.value = ('reindex_authorities', value)
task.label = 'Reindexing authorities: ' + _build_filter_label(filter_params_raw)
task.save()
return task.id
class DeleteDuplicateAttributes(BaseAction):
model = Authority
label = u'Delete Duplicate Attributes'
default_value_field = forms.CharField
default_value_field_kwargs = {
'label': 'Delete Duplicate Attributes',
'widget': forms.widgets.Textarea(attrs={'class': 'action-value', 'readonly': True, 'initial': 'Delete Duplicate Attributes'}),
}
def apply(self, user, filter_params_raw, value, **extra):
task = AsyncTask.objects.create()
result = atasks.delete_duplicate_attributes.delay(user.id, filter_params_raw, task.id)
# We can use the AsyncResult's UUID to access this task later, e.g.
# to check the return value or task state.
task.async_uuid = result.id
task.value = ('delete_duplicate_attributes', value)
task.label = 'Deleting Duplicate Attributes: ' + _build_filter_label(filter_params_raw)
task.save()
return task.id
AVAILABLE_ACTIONS = [SetRecordStatus, SetRecordStatusExplanation, SetTrackingStatus, PrependToRecordHistory, StoreCreationDataToModel, ReindexCitation]
AVAILABLE_ACTIONS_AUTHORITY = [StoreCreationDataToModel, ReindexAuthorities, DeleteDuplicateAttributes]
|
upconsulting/IsisCB
|
isiscb/curation/actions.py
|
Python
|
mit
| 13,091
| 0.004278
|
"""Parallel testing, supporting arbitrary collection ordering
The Workflow
------------
- Master py.test process starts up, inspects config to decide how many slave to start, if at all
- env['parallel_base_urls'] is inspected first
- py.test config.option.appliances and the related --appliance cmdline flag are used
if env['parallel_base_urls'] isn't set
- if neither are set, no parallelization happens
- Slaves are started
- Master runs collection, blocks until slaves report their collections
- Slaves each run collection and submit them to the master, then block inside their runtest loop,
waiting for tests to run
- Master diffs slave collections against its own; the test ids are verified to match
across all nodes
- Master enters main runtest loop, uses a generator to build lists of test groups which are then
sent to slaves, one group at a time
- For each phase of each test, the slave serializes test reports, which are then unserialized on
the master and handed to the normal pytest reporting hooks, which is able to deal with test
reports arriving out of order
- Before running the last test in a group, the slave will request more tests from the master
- If more tests are received, they are run
- If no tests are received, the slave will shut down after running its final test
- After all slaves are shut down, the master will do its end-of-session reporting as usual, and
shut down
"""
from itertools import groupby
import difflib
import json
import os
import signal
import subprocess
from collections import defaultdict, deque, namedtuple
from datetime import datetime
from itertools import count
import attr
from threading import Thread
from time import sleep, time
import pytest
import zmq
from _pytest import runner
from fixtures import terminalreporter
from fixtures.parallelizer import remote
from fixtures.pytest_store import store
from cfme.utils import at_exit, conf
from cfme.utils.log import create_sublogger
from cfme.utils.path import conf_path
# Initialize slaveid to None, indicating this as the master process
# slaves will set this to a unique string when they're initialized
conf.runtime['env']['slaveid'] = None
if not conf.runtime['env'].get('ts'):
ts = str(time())
conf.runtime['env']['ts'] = ts
def pytest_addhooks(pluginmanager):
import hooks
pluginmanager.add_hookspecs(hooks)
@pytest.mark.trylast
def pytest_configure(config):
"""Configures the parallel session, then fires pytest_parallel_configured."""
reporter = terminalreporter.reporter()
holder = config.pluginmanager.get_plugin("appliance-holder")
appliances = holder.appliances
if len(appliances) > 1:
session = ParallelSession(config, appliances)
config.pluginmanager.register(session, "parallel_session")
store.parallelizer_role = 'master'
reporter.write_line(
'As a parallelizer master kicking off parallel session for these {} appliances'.format(
len(appliances)),
green=True)
config.hook.pytest_parallel_configured(parallel_session=session)
else:
reporter.write_line('No parallelization required', green=True)
config.hook.pytest_parallel_configured(parallel_session=None)
def handle_end_session(signal, frame):
# when signaled, end the current test session immediately
if store.parallel_session:
store.parallel_session.session_finished = True
signal.signal(signal.SIGQUIT, handle_end_session)
@attr.s(hash=False)
class SlaveDetail(object):
slaveid_generator = ('slave{:02d}'.format(i) for i in count())
appliance = attr.ib()
id = attr.ib(default=attr.Factory(
lambda: next(SlaveDetail.slaveid_generator)))
forbid_restart = attr.ib(default=False, init=False)
tests = attr.ib(default=attr.Factory(set), repr=False)
process = attr.ib(default=None, repr=False)
provider_allocation = attr.ib(default=attr.Factory(list), repr=False)
def start(self):
if self.forbid_restart:
return
devnull = open(os.devnull, 'w')
# worker output redirected to null; useful info comes via messages and logs
self.process = subprocess.Popen(
['python', remote.__file__, self.id, self.appliance.as_json, conf.runtime['env']['ts']],
stdout=devnull,
)
at_exit(self.process.kill)
def poll(self):
if self.process is not None:
return self.process.poll()
class ParallelSession(object):
def __init__(self, config, appliances):
self.config = config
self.session = None
self.session_finished = False
self.countfailures = 0
self.collection = []
self.sent_tests = 0
self.log = create_sublogger('master')
self.maxfail = config.getvalue("maxfail")
self._failed_collection_errors = {}
self.terminal = store.terminalreporter
self.trdist = None
self.slaves = {}
self.test_groups = self._test_item_generator()
self._pool = []
from cfme.utils.conf import cfme_data
self.provs = sorted(set(cfme_data['management_systems'].keys()),
key=len, reverse=True)
self.used_prov = set()
self.failed_slave_test_groups = deque()
self.slave_spawn_count = 0
self.appliances = appliances
# set up the ipc socket
zmq_endpoint = 'ipc://{}'.format(
config.cache.makedir('parallelize').join(str(os.getpid())))
ctx = zmq.Context.instance()
self.sock = ctx.socket(zmq.ROUTER)
self.sock.bind(zmq_endpoint)
# clean out old slave config if it exists
slave_config = conf_path.join('slave_config.yaml')
slave_config.check() and slave_config.remove()
# write out the slave config
conf.runtime['slave_config'] = {
'args': self.config.args,
'options': dict( # copy to avoid aliasing
self.config.option.__dict__,
use_sprout=False, # Slaves don't use sprout
),
'zmq_endpoint': zmq_endpoint,
}
if hasattr(self, "slave_appliances_data"):
conf.runtime['slave_config']["appliance_data"] = self.slave_appliances_data
conf.save('slave_config')
for appliance in self.appliances:
slave_data = SlaveDetail(appliance=appliance)
self.slaves[slave_data.id] = slave_data
for slave in sorted(self.slaves):
self.print_message("using appliance {}".format(self.slaves[slave].appliance.url),
slave, green=True)
def _slave_audit(self):
# XXX: There is currently no mechanism to add or remove slave_urls, short of
# firing up the debugger and doing it manually. This is making room for
# planned future abilities to dynamically add and remove slaves via automation
# check for unexpected slave shutdowns and redistribute tests
for slave in self.slaves.values():
returncode = slave.poll()
if returncode:
slave.process = None
if returncode == -9:
msg = '{} killed due to error, respawning'.format(slave.id)
else:
msg = '{} terminated unexpectedly with status {}, respawning'.format(
slave.id, returncode)
if slave.tests:
failed_tests, slave.tests = slave.tests, set()
num_failed_tests = len(failed_tests)
self.sent_tests -= num_failed_tests
msg += ' and redistributing {} tests'.format(num_failed_tests)
self.failed_slave_test_groups.append(failed_tests)
self.print_message(msg, purple=True)
# If a slave was terminated for any reason, kill that slave
# the terminated flag implies the appliance has died :(
for slave in list(self.slaves.values()):
if slave.forbid_restart:
if slave.process is None:
self.config.hook.pytest_miq_node_shutdown(
config=self.config, nodeinfo=slave.appliance.url)
del self.slaves[slave.id]
else:
# no hook call here, a future audit will handle the fallout
self.print_message(
"{}'s appliance has died, deactivating slave".format(slave.id))
self.interrupt(slave)
else:
if slave.process is None:
slave.start()
self.slave_spawn_count += 1
def send(self, slave, event_data):
"""Send data to slave.
``event_data`` will be serialized as JSON, and so must be JSON serializable
"""
event_json = json.dumps(event_data)
self.sock.send_multipart([slave.id, '', event_json])
def recv(self):
# poll the zmq socket, populate the recv queue deque with responses
events = zmq.zmq_poll([(self.sock, zmq.POLLIN)], 50)
if not events:
return None, None, None
slaveid, _, event_json = self.sock.recv_multipart(flags=zmq.NOBLOCK)
event_data = json.loads(event_json)
event_name = event_data.pop('_event_name')
if slaveid not in self.slaves:
self.log.error("message from terminated worker %s %s %s",
slaveid, event_name, event_data)
return None, None, None
return self.slaves[slaveid], event_data, event_name
def print_message(self, message, prefix='master', **markup):
"""Print a message from a node to the py.test console
Args:
message: The message to print
**markup: If set, overrides the default markup when printing the message
"""
# differentiate master and slave messages by default
prefix = getattr(prefix, 'id', prefix)
if not markup:
if prefix == 'master':
markup = {'blue': True}
else:
markup = {'cyan': True}
stamp = datetime.now().strftime("%Y%m%d %H:%M:%S")
self.terminal.write_ensure_prefix(
'({})[{}] '.format(prefix, stamp), message, **markup)
def ack(self, slave, event_name):
"""Acknowledge a slave's message"""
self.send(slave, 'ack {}'.format(event_name))
def monitor_shutdown(self, slave):
# non-daemon so slaves get every opportunity to shut down cleanly
shutdown_thread = Thread(target=self._monitor_shutdown_t,
args=(slave.id, slave.process))
shutdown_thread.start()
def _monitor_shutdown_t(self, slaveid, process):
# a KeyError here means self.slaves got mangled, indicating a problem elsewhere
if process is None:
self.log.warning('Slave was missing when trying to monitor shutdown')
def sleep_and_poll():
start_time = time()
# configure the polling logic
polls = 0
# how often to poll
poll_sleep_time = .5
# how often to report (calculated to be around once a minute based on poll_sleep_time)
poll_report_modulo = 60 / poll_sleep_time
# maximum time to wait
poll_num_sec = 300
while (time() - start_time) < poll_num_sec:
polls += 1
yield
if polls % poll_report_modulo == 0:
remaining_time = int(poll_num_sec - (time() - start_time))
self.print_message(
'{} still shutting down, '
'will continue polling for {} seconds '
.format(slaveid, remaining_time), blue=True)
sleep(poll_sleep_time)
# start the poll
for poll in sleep_and_poll():
ec = process.poll()
if ec is None:
continue
else:
if ec == 0:
self.print_message('{} exited'.format(slaveid), green=True)
else:
self.print_message('{} died'.format(slaveid), red=True)
break
else:
self.print_message('{} failed to shut down gracefully; killed'.format(slaveid),
red=True)
process.kill()
def interrupt(self, slave, **kwargs):
"""Nicely ask a slave to terminate"""
slave.forbid_restart = True
if slave.poll() is None:
slave.process.send_signal(subprocess.signal.SIGINT)
self.monitor_shutdown(slave, **kwargs)
def kill(self, slave, **kwargs):
"""Rudely kill a slave"""
slave.forbid_restart = True
if slave.poll() is None:
slave.process.kill()
self.monitor_shutdown(slave, **kwargs)
def send_tests(self, slave):
"""Send a slave a group of tests"""
try:
tests = list(self.failed_slave_test_groups.popleft())
except IndexError:
tests = self.get(slave)
self.send(slave, tests)
slave.tests.update(tests)
collect_len = len(self.collection)
tests_len = len(tests)
self.sent_tests += tests_len
if tests:
self.print_message('sent {} tests to {} ({}/{}, {:.1f}%)'.format(
tests_len, slave.id, self.sent_tests, collect_len,
self.sent_tests * 100. / collect_len
))
return tests
def pytest_sessionstart(self, session):
"""pytest sessionstart hook
- sets up distributed terminal reporter
- sets up zmp ipc socket for the slaves to use
- writes pytest options and args to slave_config.yaml
- starts the slaves
- register atexit kill hooks to destroy slaves at the end if things go terribly wrong
"""
# If reporter() gave us a fake terminal reporter in __init__, the real
# terminal reporter is registered by now
self.terminal = store.terminalreporter
self.trdist = TerminalDistReporter(self.config, self.terminal)
self.config.pluginmanager.register(self.trdist, "terminaldistreporter")
self.session = session
def pytest_runtestloop(self):
"""pytest runtest loop
- Disable the master terminal reporter hooks, so we can add our own handlers
that include the slaveid in the output
- Send tests to slaves when they ask
- Log the starting of tests and test results, including slave id
- Handle clean slave shutdown when they finish their runtest loops
- Restore the master terminal reporter after testing so we get the final report
"""
# Build master collection for slave diffing and distribution
self.collection = [item.nodeid for item in self.session.items]
# Fire up the workers after master collection is complete
# master and the first slave share an appliance, this is a workaround to prevent a slave
# from altering an appliance while master collection is still taking place
for slave in self.slaves.values():
slave.start()
try:
self.print_message("Waiting for {} slave collections".format(len(self.slaves)),
red=True)
# Turn off the terminal reporter to suppress the builtin logstart printing
terminalreporter.disable()
while True:
# spawn/kill/replace slaves if needed
self._slave_audit()
if not self.slaves:
# All slaves are killed or errored, we're done with tests
self.print_message('all slaves have exited', yellow=True)
self.session_finished = True
if self.session_finished:
break
slave, event_data, event_name = self.recv()
if event_name == 'message':
message = event_data.pop('message')
markup = event_data.pop('markup')
# messages are special, handle them immediately
self.print_message(message, slave, **markup)
self.ack(slave, event_name)
elif event_name == 'collectionfinish':
slave_collection = event_data['node_ids']
# compare slave collection to the master, all test ids must be the same
self.log.debug('diffing {} collection'.format(slave.id))
diff_err = report_collection_diff(
slave.id, self.collection, slave_collection)
if diff_err:
self.print_message(
'collection differs, respawning', slave.id,
purple=True)
self.print_message(diff_err, purple=True)
self.log.error('{}'.format(diff_err))
self.kill(slave)
slave.start()
else:
self.ack(slave, event_name)
elif event_name == 'need_tests':
self.send_tests(slave)
self.log.info('starting master test distribution')
elif event_name == 'runtest_logstart':
self.ack(slave, event_name)
self.trdist.runtest_logstart(
slave.id,
event_data['nodeid'],
event_data['location'])
elif event_name == 'runtest_logreport':
self.ack(slave, event_name)
report = unserialize_report(event_data['report'])
if report.when in ('call', 'teardown'):
slave.tests.discard(report.nodeid)
self.trdist.runtest_logreport(slave.id, report)
elif event_name == 'internalerror':
self.ack(slave, event_name)
self.print_message(event_data['message'], slave, purple=True)
self.kill(slave)
elif event_name == 'shutdown':
self.config.hook.pytest_miq_node_shutdown(
config=self.config, nodeinfo=slave.appliance.url)
self.ack(slave, event_name)
del self.slaves[slave.id]
self.monitor_shutdown(slave)
# total slave spawn count * 3, to allow for each slave's initial spawn
# and then each slave (on average) can fail two times
if self.slave_spawn_count >= len(self.appliances) * 3:
self.print_message(
'too many slave respawns, exiting',
red=True, bold=True)
raise KeyboardInterrupt('Interrupted due to slave failures')
except Exception as ex:
self.log.error('Exception in runtest loop:')
self.log.exception(ex)
self.print_message(str(ex))
raise
finally:
terminalreporter.enable()
# Suppress other runtestloop calls
return True
def _test_item_generator(self):
for tests in self._modscope_item_generator():
yield tests
def _modscope_item_generator(self):
# breaks out tests by module, can work just about any way we want
# as long as it yields lists of tests id from the master collection
sent_tests = 0
collection_len = len(self.collection)
def get_fspart(nodeid):
return nodeid.split('::')[0]
for fspath, gen_moditems in groupby(self.collection, key=get_fspart):
for tests in self._modscope_id_splitter(gen_moditems):
sent_tests += len(tests)
self.log.info('{} tests remaining to send'.format(
collection_len - sent_tests))
yield list(tests)
def _modscope_id_splitter(self, module_items):
# given a list of item ids from one test module, break up tests into groups with the same id
parametrized_ids = defaultdict(list)
for item in module_items:
if '[' in item:
# split on the leftmost bracket, then strip everything after the rightmight bracket
# so 'test_module.py::test_name[parametrized_id]' becomes 'parametrized_id'
parametrized_id = item.split('[')[1].rstrip(']')
else:
# splits failed, item has no parametrized id
parametrized_id = 'no params'
parametrized_ids[parametrized_id].append(item)
for id, tests in parametrized_ids.items():
if tests:
self.log.info('sent tests with param {} {!r}'.format(id, tests))
yield tests
def get(self, slave):
def provs_of_tests(test_group):
found = set()
for test in test_group:
found.update(pv for pv in self.provs
if '[' in test and pv in test)
return sorted(found)
if not self._pool:
for test_group in self.test_groups:
self._pool.append(test_group)
self.used_prov.update(provs_of_tests(test_group))
if self.used_prov:
self.ratio = float(len(self.slaves)) / len(self.used_prov)
else:
self.ratio = 0.0
if not self._pool:
return []
appliance_num_limit = 1
for idx, test_group in enumerate(self._pool):
provs = provs_of_tests(test_group)
if provs:
prov = provs[0]
if prov in slave.provider_allocation:
# provider is already with the slave, so just return the tests
self._pool.remove(test_group)
return test_group
else:
if len(slave.provider_allocation) >= appliance_num_limit:
continue
else:
# Adding provider to slave since there are not too many
slave.provider_allocation.append(prov)
self._pool.remove(test_group)
return test_group
else:
# No providers - ie, not a provider parametrized test
# or no params, so not parametrized at all
self._pool.remove(test_group)
return test_group
# Here means no tests were able to be sent
for test_group in self._pool:
provs = provs_of_tests(test_group)
if provs:
prov = provs[0]
# Already too many slaves with provider
app = slave.appliance
self.print_message(
'cleansing appliance', slave, purple=True)
try:
app.delete_all_providers()
except Exception as e:
self.print_message(
'cloud not cleanse', slave, red=True)
self.print_message('error:', e, red=True)
slave.provider_allocation = [prov]
self._pool.remove(test_group)
return test_group
assert not self._pool, self._pool
return []
def report_collection_diff(slaveid, from_collection, to_collection):
"""Report differences, if any exist, between master and a slave collection
Raises RuntimeError if collections differ
Note:
This function will sort functions before comparing them.
"""
from_collection, to_collection = sorted(from_collection), sorted(to_collection)
if from_collection == to_collection:
# Well, that was easy.
return
# diff the two, so we get some idea of what's wrong
diff = difflib.unified_diff(
from_collection,
to_collection,
fromfile='master',
tofile=slaveid,
)
# diff is a line generator, stringify it
diff = '\n'.join([line.rstrip() for line in diff])
return '{slaveid} diff:\n{diff}\n'.format(slaveid=slaveid, diff=diff)
class TerminalDistReporter(object):
"""Terminal Reporter for Distributed Testing
trdist reporter exists to make sure we get good distributed logging during the runtest loop,
which means the normal terminal reporter should be disabled during the loop
This class is where we make sure the terminal reporter is made aware of whatever state it
needs to report properly once we turn it back on after the runtest loop
It has special versions of pytest reporting hooks that, where possible, try to include a
slave ID. These hooks are called in :py:class:`ParallelSession`'s runtestloop hook.
"""
def __init__(self, config, terminal):
self.config = config
self.tr = terminal
self.outcomes = {}
def runtest_logstart(self, slaveid, nodeid, location):
test = self.tr._locationline(nodeid, *location)
prefix = '({}) {}'.format(slaveid, test)
self.tr.write_ensure_prefix(prefix, 'running', blue=True)
self.config.hook.pytest_runtest_logstart(nodeid=nodeid, location=location)
def runtest_logreport(self, slaveid, report):
# Run all the normal logreport hooks
self.config.hook.pytest_runtest_logreport(report=report)
# Now do what the terminal reporter would normally do, but include parallelizer info
outcome, letter, word = self.config.hook.pytest_report_teststatus(report=report)
# Stash stats on the terminal reporter so it reports properly
# after it's reenabled at the end of runtestloop
self.tr.stats.setdefault(outcome, []).append(report)
test = self.tr._locationline(report.nodeid, *report.location)
prefix = '({}) {}'.format(slaveid, test)
try:
# for some reason, pytest_report_teststatus returns a word, markup tuple
# when the word would be 'XPASS', so unpack it here if that's the case
word, markup = word
except (TypeError, ValueError):
# word wasn't iterable or didn't have enough values, use it as-is
pass
if word in ('PASSED', 'xfail'):
markup = {'green': True}
elif word in ('ERROR', 'FAILED', 'XPASS'):
markup = {'red': True}
elif word:
markup = {'yellow': True}
# For every stage where we can report the outcome, stash it in the outcomes dict
if word:
self.outcomes[test] = Outcome(word, markup)
# Then, when we get to the teardown report, print the last outcome
# This prevents reportings a test as 'PASSED' if its teardown phase fails, for example
if report.when == 'teardown':
word, markup = self.outcomes.pop(test)
self.tr.write_ensure_prefix(prefix, word, **markup)
Outcome = namedtuple('Outcome', ['word', 'markup'])
def unserialize_report(reportdict):
"""
Generate a :py:class:`TestReport <pytest:_pytest.runner.TestReport>` from a serialized report
"""
return runner.TestReport(**reportdict)
|
jkandasa/integration_tests
|
fixtures/parallelizer/__init__.py
|
Python
|
gpl-2.0
| 27,463
| 0.002185
|
import numpy, sys, os, pylab, astropy, astropy.io.fits as pyfits, ldac, math
def open_and_get_shearcat(filename, tablename):
#
# for opening and retrieving shear cat.
#
return ldac.openObjectFile(filename, tablename)
#class ello
def avg_shear(g1array, g2array):
avg1 = numpy.mean(g1array)
avg2 = numpy.mean(g2array)
# leave open possibility of weighted average
return [avg1,avg2]
def avg_shear_aniscorr(g1array, g2array, epol1, epol2):
# g1 array : numpy array of g1
# g2 array : numpy array of g2
# e1po1 array: array of e1 correction at gal position
# e2pol array: array of e2 correction at gal position
# Average shear in bins of the ell correction
# this may be defunct.
# get indices of sorted (by epol) arrays
indx = numpy.lexsort((g1array,epol1))
indy = numpy.lexsort((g2array,epol2))
sortedx = []
sortedy = []
binsy =[0] # first bin 0
binsx =[0] #
binwidth = len(g1array) / 10 # 10 bins
for j in range(1,10):
binsx.append(epol1[ind[j*binwidth]]+0.00001)
binsy.append(epol2[ind[j*binwidth]]+0.00001)
binsx.append(epol1[ind[-1]]+0.00001)
binsy.append(epol2[ind[-1]]+0.00001)
for i in range(len(g1array)):
sortedx.append([g1array[indx[i]],epol1[indx[i]]])
sortedy.append([g2array[indx[i]],epol2[indx[i]]])
xarr = numpy.array(sortedx)
yarr = numpy.array(sortedy)
xavgs = []
yavgs = []
for j in range(10):
xavgs.append(numpy.average(xarr[binsx[j]:binsx[j+1],0]))
yavgs.append(numpy.average(yarr[binsy[j]:binsy[j+1],0]))
return xavgs, binsx, yavgs, binsy
# lets make 10 bins
def avg_epol_gamma(g1array, g2array, epol1, epol2):
# g1 array : numpy array of g1
# g2 array : numpy array of g2
# e1po1 array: array of e1 correction at gal position
# e2pol array: array of e2 correction at gal position
avg1 = numpy.mean(g1array*epol1)
err1 = numpy.std(g1array*epol1)/math.sqrt(len(epol1)*1.0)
err1bs = do_bootstrap_error(g1array*epol1)
avg2 = numpy.mean(g2array*epol2)
err2 = numpy.std(g2array*epol2)/math.sqrt(len(epol2)*1.0)
err2bs = do_bootstrap_error(g2array*epol2)
# print avg1,avg2,err1,err2
return avg1,avg2,err1,err2,err1bs, err2bs
def star_gal_correlation(galarray, stararray):
# galarray: array with galaxy positions and
# shears values.
# stararray: array with star positions and
# ell values values.
gal_g1arr = galarray['g1']
gal_g2arr = galarray['g2']
gal_xarr = galarray['x']
gal_yarr = galarray['y']
star_xarr = stararray['x']
star_yarr = stararray['y']
star_e1pol = stararray['e1pol']
star_e2pol = stararray['e2pol']
star_e1 = stararray['e1']
star_e2 = stararray['e2']
# create full arrays for correlations
# star arrays : 1111... 22222... 3333...
# gal arrays : 1234... 12345... 1234
#
starlen=len(stararray['e1'])
gallen=len(galarray['g1'])
gal_g1corr = make_gal_corrarray(galarray['g1'],starlen)
gal_g2corr = make_gal_corrarray(galarray['g2'],starlen)
gal_xcorr = make_gal_corrarray(galarray['x'],starlen)
gal_ycorr = make_gal_corrarray(galarray['y'],starlen)
star_e1corr = make_star_corrarray(stararray['e1'],gallen)
star_e2corr = make_star_corrarray(stararray['e2'],gallen)
star_xcorr = make_star_corrarray(stararray['x'],gallen)
star_ycorr = make_star_corrarray(stararray['y'],gallen)
distcorr = numpy.sqrt((star_xcorr-gal_xcorr)*(star_xcorr-gal_xcorr)+ \
(star_ycorr-gal_ycorr)*(star_ycorr-gal_ycorr))
xi_pp = gal_g1corr*star_e1corr + gal_g2corr*star_e2corr
#star autocorrelation
emagarray=numpy.sqrt(stararray['e1']*stararray['e1']+stararray['e2']*stararray['e2'])
emagautocorr=numpy.zeros((starlen*(starlen-1))/2)
edistautocorr=numpy.zeros((starlen*(starlen-1))/2)
iterator=0
# I'm sure there's a better way to do this.
for i in range(len(emagarray)):
for j in range(i+1,len(emagarray)):
emagautocorr[iterator]=emagarray[i]*emagarray[j]
edistautocorr[iterator]=math.sqrt(((stararray['x'][i]-stararray['x'][j])*\
(stararray['x'][i]-stararray['x'][j]))+\
((stararray['y'][i]-stararray['y'][j])*\
(stararray['y'][i]-stararray['y'][j])))
iterator=iterator + 1
return xi_pp, distcorr, emagautocorr, edistautocorr
def make_gal_corrarray( objarray, n ):
m = len(objarray)
a = numpy.array(numpy.zeros((n*m)))
for k in range(n):
a[k*m:(k+1)*m]=objarray
return a
def make_star_corrarray( objarray, n ):
m = len(objarray)
a = numpy.array(numpy.zeros((n*m)))
for k in range(m):
a[k*n:(k+1)*n]=objarray[k]
return a
def do_bootstrap_error(inputarray, nbootstraps=100):
n = len(inputarray)
npars=inputarray[numpy.random.random_integers(0,n-1,(n,nbootstraps))]
meanlist = numpy.mean(npars,0)
if len(meanlist) != nbootstraps:
print 'averaging across wrong axis'
return numpy.std(meanlist)
#
# switch to polar coordinates
#
def cartesianToPolar(x, y):
r = numpy.sqrt(x**2+y**2)
phi = numpy.arccos(x/r)
phi2 = phi = 2. * numpy.pi - phi
phi_yp = y>=0.
phi2_yp = y<0.
phi = phi* phi_yp +phi2* phi2_yp
return r, phi
#
# make the plots
#
def make_scatter_inputs(yvals, xvals,therange, nbins=10):
if len(yvals) != len(xvals):
print len(yvals), ' doeas not equal ',len(xvals)
vals, thebins = pylab.histogram(xvals, weights=yvals, bins=nbins,range=therange)
vals_sq, thebins = pylab.histogram(xvals, weights=yvals*yvals, bins=nbins ,range=therange)
vals_n, thebins = pylab.histogram(xvals, bins=nbins,range=therange)
val_errs = numpy.sqrt((vals_sq/vals_n) - (vals/vals_n)*(vals/vals_n))/numpy.sqrt(vals_n)
bincenters=[]
binerrs=[]
# print 'The Bins = ', thebins
for k in range(len(thebins)-1):
bincenters.append((thebins[k]+thebins[k+1])/2.)
binerrs.append((thebins[k+1]-thebins[k])/2.)
# print 'bincenters = ',bincenters
return bincenters, vals/vals_n, binerrs, val_errs
def get_percentiles(arr):
# return 10 and 90 %iles
sorted = numpy.sort(arr)
n = len(sorted)
val = n/10
return sorted[val],sorted[n-val]
if __name__ == "__main__":
filename_gal = sys.argv[1]
filename_star = sys.argv[2]
if len(sys.argv)==3:
outfilename = 'psfplots.png'
elif len(sys.argv)==4:
outfilename = sys.argv[3]
else:
print 'usage: ./quality_studies_psf.py [galaxy_shear.cat] [star.cat] [output=psfplots.png]'
sys.exit(1)
galcat = open_and_get_shearcat(filename_gal,'OBJECTS')
starcat = open_and_get_shearcat(filename_star,'OBJECTS')
if galcat:
print ' got Galaxy cat'
if starcat:
print ' got Star cat'
maxrg=numpy.max(starcat['rg'])
galcat = galcat.filter(galcat['rg']>maxrg)
galcat = galcat.filter(galcat['Flag']==0)
gal_g1arr = numpy.array(galcat['gs1'])
gal_g2arr = numpy.array(galcat['gs2'])
gal_xarr = numpy.array(galcat['x'])
gal_yarr = numpy.array(galcat['y'])
gal_e1corr = numpy.array(galcat['e1corrpol'])
gal_e2corr = numpy.array(galcat['e2corrpol'])
star_xarr = numpy.array(starcat['x'])
star_yarr = numpy.array(starcat['y'])
star_e1corr = numpy.array(starcat['e1corrpol'])
star_e2corr = numpy.array(starcat['e2corrpol'])
star_e1 = numpy.array(starcat['e1'])
star_e2 = numpy.array(starcat['e2'])
pylab.rc('text', usetex=True)
pylab.figure(figsize=(15,10) ,facecolor='w')
pylab.subplots_adjust(wspace=0.3,hspace=0.3)
pylab.subplot(231,axisbg='w')
pylab.cool()
# Qualtest 1 : Average shear:
avg_gs1 = numpy.mean(gal_g1arr)
err_gs1 = numpy.std(gal_g1arr)/math.sqrt(len(gal_g1arr*1.0))
err_gs1bs = do_bootstrap_error(gal_g1arr)
avg_gs2 = numpy.mean(gal_g2arr)
err_gs2 = numpy.std(gal_g2arr)/math.sqrt(len(gal_g2arr*1.0))
err_gs2bs = do_bootstrap_error(gal_g2arr)
pylab.errorbar(y=[avg_gs2,avg_gs2],x=[avg_gs1,avg_gs1],
xerr=[err_gs1,err_gs1bs], yerr=[err_gs2,err_gs2bs], fmt='r.',
label='''$<\gamma_{1,2}> $''')
pylab.axis([-0.04,0.04,-0.04,0.04])
pylab.xlabel('$<\gamma_{1}>$', horizontalalignment='right')
pylab.ylabel('$<\gamma_{2}>$')
pylab.legend(loc=0)
pylab.grid()
# Qualtest 2 : Average shear in aniso corr bins.
# e1anisocorr : left over from correction
# e1corrpol : the correction.
# the anisotropy polynomial values for all the objects.
bincenters, gamma1vals, binerrs, gamma1errs = \
make_scatter_inputs(gal_g1arr, gal_e1corr, (-0.02,0.03), 10)
bincenters2, gamma2vals, binerrs2, gamma2errs = \
make_scatter_inputs(gal_g2arr, gal_e2corr, (-0.02,0.03), 10)
pylab.subplot(232,axisbg='w')
pylab.errorbar(x=bincenters,y=gamma1vals,yerr=gamma1errs,xerr=binerrs,
fmt='b.',label='''$<\gamma_{1}>$''')
pylab.errorbar(x=bincenters2,y=gamma2vals,yerr=gamma2errs,xerr=binerrs2,
fmt='r.',label='$<\gamma_{2}>$')
pylab.axis([-0.05,0.05,-0.2,0.2])
pylab.xlabel('$e^{*pol}_{1,2}$', horizontalalignment='right')
pylab.ylabel('''$<\gamma_{1,2}>$''')
pylab.legend(loc=0)
pylab.grid()
# Qualtest 3 : <epol gamma>
eg1,eg2, eg1err, eg2err, eg1errbs, eg2errbs = \
avg_epol_gamma(gal_g1arr, gal_g2arr, gal_e1corr, gal_e1corr)
pylab.subplot(233)
pylab.errorbar(x=[eg1, eg1],y=[eg2,eg2],
xerr=[eg1err,eg1errbs], yerr=[eg2err, eg2errbs], fmt='b.',
label='''$<e^{pol}_{1,2}\gamma_{1,2}>$ ''')
pylab.cool()
pylab.legend(loc=0)
pylab.axis([-0.0004,0.0004,-0.0004,0.0004])
pylab.xlabel('''$<e^{pol}_{1}\gamma_{1}>$ ''', horizontalalignment='right')
pylab.ylabel('''$<e^{pol}_{2}\gamma_{2}>$ ''')
pylab.grid()
# Qualtest 5 : epol * g
pylab.subplot(234)
galarray={'x':gal_xarr, 'y':gal_yarr, 'g1':gal_g1arr, 'g2':gal_g2arr}
stararray={'x':star_xarr,
'y':star_yarr,
'e1':star_e1,
'e2':star_e2,
'e1pol':star_e1corr,
'e2pol':star_e2corr}
xi_pp, distcorr,emagautocorr, edistautocorr = \
star_gal_correlation(galarray, stararray)
xv, yv, xe, ye = make_scatter_inputs(xi_pp, distcorr,(0,10000), nbins=10 )
pylab.errorbar(x=xv,y=yv,yerr=ye,xerr=xe,fmt='b.',label='data')
#######################
# Here we create the random star
# need e1 and e2 arrays
# First Generate same ellipticity distribution
#######################
# |ellipticity| distribution
elldist = numpy.sqrt(star_e1*star_e1+star_e2*star_e2)
rxi_ppt=[]
distcorrt=[]
rxivals1 = [0,0,0,0,0,0,0,0,0,0]
rxierrs1 = [0,0,0,0,0,0,0,0,0,0]
rxivalssq1 = [0,0,0,0,0,0,0,0,0,0]
rxivals1n = [0,0,0,0,0,0,0,0,0,0]
ntrials=10
rxivals1_sum = numpy.zeros(ntrials)
rxierrs1_sum = numpy.zeros(ntrials)
for isim in range(10):
# for each trial, generate random numbers to sample from
# the ellipticity distribution
ellindex = numpy.random.random_integers(0,len(elldist)-1)
# Set up the array
rand_ell_arr = numpy.zeros(len(elldist))
# fill the array, I think there's a fast way to do this...
for i in range(len(elldist)):
rand_ell_arr[i] = elldist[rand_ell_arr[i]]
# now the random angle 0-pi
rand_phi_arr = numpy.random.uniform(0,math.pi,len(elldist))
# the e1 & e2 projections
this_e1_arr = rand_ell_arr*numpy.cos(2.*rand_phi_arr)
this_e2_arr = rand_ell_arr*numpy.sin(2.*rand_phi_arr)
# set the random star array
stararray={'x':star_xarr,
'y':star_yarr,
'e1':this_e1_arr,
'e2':this_e2_arr,
'e1pol':star_e1corr,
'e2pol':star_e2corr}
# and correlate;
rxi_pp, distcorr, dum1, dum2 = star_gal_correlation(galarray, stararray)
rxibins1, rxivals1[isim],rxibinserr1, rxierrs1[isim]=\
make_scatter_inputs(rxi_pp, distcorr,therange=(0,10000), nbins=10)
for j in range(len(rxivals1[isim])):
rxivals1_sum[j] = rxivals1_sum[j]+rxivals1[isim][j]
rxierrs1_sum[j] = rxierrs1_sum[j]+rxierrs1[isim][j]
# end loop
#
rxivals1_sum = rxivals1_sum/10.
rxierrs1_sum = rxierrs1_sum/(10.*math.sqrt(10))
pylab.errorbar(x=rxibins1,y=rxivals1_sum,yerr=rxierrs1_sum,xerr=rxibinserr1,\
fmt='g.',label='Random')
ee_bincent,ee_vals,ee_binerrs, ee_errs =\
make_scatter_inputs(emagautocorr, edistautocorr,\
(0,10000), nbins=10)
pylab.errorbar(x=ee_bincent, y=ee_vals, yerr=ee_errs, xerr=ee_binerrs, \
fmt='r.', label='stars')
pylab.xlabel('''$\Delta x$ (pixels) ''', horizontalalignment='right')
pylab.ylabel('''$<e^{*}_{+} \gamma_{+} +e^{*}_{x} \gamma_{x} >$''')
pylab.cool()
pylab.grid()
pylab.legend(loc=0)
xisys = (numpy.abs(yv)*(yv)/ee_vals)
xisys_err = xisys * numpy.sqrt(4.*(ye/yv)*(ye/yv) + (ee_errs/ee_vals)*(ee_errs/ee_vals))
pylab.subplot(235)
pylab.errorbar(x= xv ,y=xisys,\
yerr=xisys_err ,xerr=ee_binerrs,fmt='b.',label='xi_p-data')
pylab.xlabel('''$\Delta x$ (pixels) ''', horizontalalignment='right')
pylab.ylabel('''$ \zeta^{PSF}_{+} $''')
pylab.cool()
pylab.grid()
# Quality Check 6 Radial dependence
rset, phiset = cartesianToPolar(gal_xarr-5000,gal_yarr-5000)
g1_rotated = gal_g1arr*numpy.cos(-2.*phiset) + gal_g2arr*numpy.sin(-2.*phiset)
g2_rotated = gal_g1arr*numpy.sin( 2.*phiset) + gal_g2arr*numpy.cos(-2.*phiset)
e1pol_rotated = gal_e1corr*numpy.cos(-2.*phiset) + gal_e2corr*numpy.sin(-2.*phiset)
e2pol_rotated = gal_e1corr*numpy.sin( 2.*phiset) + gal_e2corr*numpy.cos(-2.*phiset)
rot_bins,rot_vals, rot_binerrs, rot_valerrs= \
make_scatter_inputs(g1_rotated*e1pol_rotated,rset ,(0,5000), 10)
rot_bins2,rot_vals2, rot_binerrs2, rot_valerrs2= \
make_scatter_inputs(g2_rotated*e2pol_rotated,rset ,(0,5000), 10)
pylab.subplot(236)
pylab.errorbar(x=rot_bins, y=rot_vals, xerr=rot_binerrs, yerr=rot_valerrs, \
fmt='b.',label='$<e_{r}\gamma_{r}> (r)$' )
pylab.errorbar(x=rot_bins2, y=rot_vals2, xerr=rot_binerrs2, yerr=rot_valerrs2, \
fmt='r.',label='$<e_{x}\gamma_{x}> (r)$' )
pylab.xlabel('r (pixels)')
pylab.ylabel('$<e^{pol} \gamma>$')
pylab.grid()
pylab.legend(loc=0)
pylab.savefig(outfilename,format='png')
pylab.show()
|
deapplegate/wtgpipeline
|
quality_studies_psf.py
|
Python
|
mit
| 15,321
| 0.024672
|
#!/usr/bin/python
################################################################
#
# Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Eclipse Public License, Version 1.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.eclipse.org/legal/epl-v10.html
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the
# License.
#
################################################################
_help="""
-------------------------------------------------------------------------------
NAME
oftest.py - Run OFTest against IVS
SYNOPSIS
oftest.py [--ivs-args=...] [--oft-args=...] [--test-file|-f=...] [--test-spec|-t=...]
DESCRIPTION
This script automates the execution of OFTest suites against
the IVS binary. You can use it to execute any subset of
tests against your locally built IVS binary.
This script is used in the automated testing tasks.
This script can be used by developers for manual tests.
OPTIONS
--test-spec, -T The oftest test-spec you want to execute.
This parameter is required. If you want to run
all tests, specify "all".
--test-file, -f Path to an OFTest test-file.
--log-base-dir Set Log base directory.
NOTES
You must set the following environment variables before
using this script:
$OFTEST Set to the top of the OFTest repository.
LOGFILES
The output from IVS is stored in 'testlogs/OFTest/{testname}/ivs.log
The output from oftest is store in 'testlogs/OFTest/{testname}/output.log
The oft.log file is stored in 'testlogs/OFTest/{testname}/oft.log
EXAMPLES
# Run all oftests against IVS:
> build/oftest.py -T all
"""
import os
import sys
import time
import argparse
import random
import subprocess
import pprint
import platform
import datetime
import StringIO
import signal
import select
import platform
import logging
import re
# Prevent ratelimiters from causing test failures
os.environ['INDIGO_BENCHMARK'] = '1'
###############################################################################
#
# Helpers
#
###############################################################################
def dirlist(d):
if d == None:
return [ "." ]
if type(d) == str:
return [ d ]
if type(d) != list:
raise Exception("'%s' is a bad dirlist" % d)
return d
def fselect(name, tops, subs, p=False):
tops = dirlist(tops)
subs = dirlist(subs)
for top in tops:
for sub in subs:
f = "%s/%s/%s" % (top, sub, name)
if os.path.exists(f):
return f
if p:
print "%s: not found" % f
if p == False:
fselect(name, tops, subs, p=True)
raise Exception("Could not find the '%s' binary. Search paths were %s:%s" % (name, tops, subs))
def system(command, die=False):
logging.debug("Running %s ", command)
rv = os.system(command)
if rv != 0 and die:
raise Exception(" [ %s ] FAILED: %d" % (command, rv))
return rv
def randomports(count):
return random.sample(xrange(30000, 32000), count)
def requirePathEnv(name):
p = os.getenv(name)
if p is None:
raise Exception("You must set the $%s variable." % name)
if not os.path.isdir(p):
raise Exception("The $%s variable does not point to a directory." % name)
return p
###############################################################################
IVS_BASEDIR = os.path.join(os.path.dirname(__file__), "..")
OFTEST_BASEDIR = requirePathEnv("OFTEST")
LOG_BASEDIR = "%s/testlogs/oftest" % (IVS_BASEDIR)
OFT = fselect("oft", OFTEST_BASEDIR, ".")
IVS_BINARY = fselect("ivs", IVS_BASEDIR, ["targets/ivs/build/gcc-local/bin"]);
if sys.stderr.isatty():
RED = "\x1B[31m"
GREEN = "\x1B[32m"
NORM = "\x1B[39m"
else:
RED = ""
GREEN = ""
NORM = ""
class VethNetworkConfig(object):
def __init__(self, portCount):
self.caddr = "127.0.0.1"
self.cport = randomports(1)[0]
self.switchInterfaces = ["veth%d" % (i*2) for i in range(portCount)]
self.oftestInterfaces = ["%d@veth%d" % (i+1, i*2+1) for i in range(portCount)]
def listOFTests(spec=None, testfile=None, openflowVersion=None, testDir=None):
args = [ OFT, "--list-test-names" ]
if spec:
args.append(spec)
if testfile:
args.append("--test-file=%s" % testfile)
if openflowVersion:
args.append("-V%s" % openflowVersion)
if testDir:
args.append("--test-dir=%s" % testDir)
stdout = subprocess.check_output(args);
return stdout.splitlines();
def runOFTest(test, networkConfig, logDir, openflowVersion, testDir=None, oftArgs=None):
args = [ OFT,
"-H", str(networkConfig.caddr),
"-p", str(networkConfig.cport),
"--verbose",
"--log-file", "%s/oft.log" % logDir,
"--fail-skipped" ]
args.append("-V%s" % openflowVersion)
for iface in networkConfig.oftestInterfaces:
args.append('-i')
args.append(iface)
if testDir:
args.append("--test-dir=%s" % testDir)
if oftArgs:
args = args + oftArgs
args.append(test)
with open("%s/oft.stdout.log" % (logDir), "w") as logfile:
child = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=logfile,
stderr=subprocess.STDOUT)
if not child:
raise Exception("Failed to start: ", args)
child.wait()
return child.returncode;
class IVS(object):
def __init__(self, networkConfig, logDir, openflowVersion, ivsArgs=None):
self.networkConfig = networkConfig
self.logDir = logDir
self.openflowVersion = openflowVersion
self.ivsArgs = ivsArgs
self.child = None
def start(self):
args = [ IVS_BINARY,
"-c", "%s:%d" % (self.networkConfig.caddr, self.networkConfig.cport) ]
args.append("-V%s" % self.openflowVersion)
if self.ivsArgs:
args += self.ivsArgs
for iface in self.networkConfig.switchInterfaces:
args.append("-i");
args.append(iface);
with open("%s/ivs.log" % (self.logDir), "w") as logfile:
self.child = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=logfile,
stderr=subprocess.STDOUT)
if self.child is None:
raise Exception("Failed to start IVS")
def stop(self):
if self.child:
self.child.send_signal(signal.SIGTERM)
self.child.wait()
self.child = None
# BSN test system integration
class AbatTask(object):
def __init__(self):
self.abatId = os.getenv("ABAT_ID");
assert(self.abatId)
self.abatTimestamp = os.getenv("ABAT_TIMESTAMP")
self.abatTask = os.getenv("ABAT_TASK")
self.abatWorkspace = "%s-%s" % (self.abatTimestamp, self.abatTask)
self.bscBaseDir = requirePathEnv("BSC");
self.runIds = {}
def addTestcase(self, test, testLogDir):
logUrl = "http://%s/abat/%s/%s" % (platform.node(), self.abatWorkspace, testLogDir)
runId = os.popen("%s/build/add-testcase.py %s %s %s %s | tail -n 1" % (
self.bscBaseDir, self.abatId, test, "OFTest", logUrl)).read().rstrip()
self.runIds[test] = runId
def updateTestcase(self, test, result):
system("%s/build/update-testcase.py %s %s" % (
self.bscBaseDir, self.runIds[test], result))
class AutotestIVS(object):
def __init__(self, config):
self.config = config
self.results = []
if os.getenv("ABAT_TASK"):
print "Running in ABAT."
self.abat = AbatTask()
else:
self.abat = None
self.__setup()
def __setup(self):
self.oftests = listOFTests(spec=self.config.test_spec,
testfile=self.config.test_file,
openflowVersion=self.config.openflow_version,
testDir=self.config.test_dir)
def runTests(self):
results = { 'FAILED' : [], 'PASSED' : [] }
for test in self.oftests:
result = self.runTest(test)
results[result].append(test)
print
print "%d PASSED, %d FAILED." % (len(results['PASSED']), len(results['FAILED'])),
if results['FAILED']:
print
print "Failing tests:"
for test in results['FAILED']:
print test
self.outputResultXml()
def runTest(self, test):
if self.config.test_prefix:
testName = "%s.%s" % (self.config.test_prefix, test)
elif self.config.openflow_version == "1.3":
testName = "of13.%s" % test
else:
testName = test
testLogDir = "%s/%s" % (LOG_BASEDIR, testName)
system("mkdir -p %s" % (testLogDir))
sys.stdout.write("Running %s ... " % testName)
sys.stdout.flush()
if self.abat:
self.abat.addTestcase(testName, testLogDir)
networkConfig = VethNetworkConfig(8)
ivs = IVS(networkConfig, testLogDir, self.config.openflow_version, self.config.ivs_args)
ivs.start()
rv = runOFTest(test, networkConfig, testLogDir, self.config.openflow_version,
self.config.test_dir, self.config.oft_args)
ivs.stop()
if rv == 0:
result = 'PASSED'
sys.stdout.write(GREEN + "OK" + NORM + "\n")
else:
result = 'FAILED'
sys.stdout.write(RED + "FAIL" + NORM + "\n")
print "Test logs in %s" % testLogDir
if self.abat:
self.abat.updateTestcase(testName, result)
self.updateResultXml(testName, result, testLogDir)
return result
def updateResultXml(self, testName, result, logDir):
self.results.append((testName, result, logDir))
def outputResultXml(self):
if not self.config.xml:
return
from xml.etree.ElementTree import Element, SubElement, Comment, tostring
from xml.dom import minidom
root = Element("testsuite", { 'tests': str(len(self.results)) })
for name, result, logDir in self.results:
def readLog(name):
return file(logDir + "/" + name).read()
classname, testname = name.rsplit(".", 1)
testcase = SubElement(root, 'testcase', { 'classname': classname, 'name': testname })
if result == 'FAILED':
failure = SubElement(testcase, 'failure', { 'type': 'Failure' })
failure.text = re.search(r'-{70}(.*?)-{70}', readLog("oft.stdout.log"), re.DOTALL).group(1).strip()
system_out = SubElement(testcase, 'system-out')
system_out.text = readLog("oft.log")
system_err = SubElement(testcase, 'system-err')
system_err.text = readLog("ivs.log")
with open(self.config.xml, 'w') as f:
f.write(minidom.parseString(tostring(root)).toprettyxml(indent=" "))
if __name__ == "__main__":
import argparse
ap = argparse.ArgumentParser(description="",
epilog=_help,
formatter_class=argparse.RawDescriptionHelpFormatter)
ap.add_argument("-T", "--test-spec", help="OFTest test specification", default=None)
ap.add_argument("-f", "--test-file", help="OFTest test file", default=None)
ap.add_argument("--ivs-args", action="append", help="Additional arguments passed to IVS.")
ap.add_argument("--oft-args", action="append", help="Additional arguments passed to oft.")
ap.add_argument("--log-base-dir", help="Set the log base directory.", default=None)
ap.add_argument("-V", "--openflow-version", help="OpenFlow version (1.0, 1.3)", default="1.0")
ap.add_argument("--test-dir", help="Directory containing tests")
ap.add_argument("--test-prefix", help="Prefix to use when reporting results")
ap.add_argument("--xml", help="Write a JUnit XML result file")
config = ap.parse_args()
if config.log_base_dir:
LOG_BASEDIR = config.log_base_dir
if not (config.test_spec or config.test_file):
sys.exit("Must specify at least one of --test-spec or --test-file")
a = AutotestIVS(config)
a.runTests()
|
floodlight/ivs
|
build/oftest.py
|
Python
|
epl-1.0
| 13,000
| 0.006692
|
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.config_store', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## file-config.h (module 'config-store'): ns3::FileConfig [class]
module.add_class('FileConfig', allow_subclassing=True)
## gtk-config-store.h (module 'config-store'): ns3::GtkConfigStore [class]
module.add_class('GtkConfigStore')
## file-config.h (module 'config-store'): ns3::NoneFileConfig [class]
module.add_class('NoneFileConfig', parent=root_module['ns3::FileConfig'])
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## config-store.h (module 'config-store'): ns3::ConfigStore [class]
module.add_class('ConfigStore', parent=root_module['ns3::ObjectBase'])
## config-store.h (module 'config-store'): ns3::ConfigStore::Mode [enumeration]
module.add_enum('Mode', ['LOAD', 'SAVE', 'NONE'], outer_class=root_module['ns3::ConfigStore'])
## config-store.h (module 'config-store'): ns3::ConfigStore::FileFormat [enumeration]
module.add_enum('FileFormat', ['XML', 'RAW_TEXT'], outer_class=root_module['ns3::ConfigStore'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3FileConfig_methods(root_module, root_module['ns3::FileConfig'])
register_Ns3GtkConfigStore_methods(root_module, root_module['ns3::GtkConfigStore'])
register_Ns3NoneFileConfig_methods(root_module, root_module['ns3::NoneFileConfig'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3ConfigStore_methods(root_module, root_module['ns3::ConfigStore'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3FileConfig_methods(root_module, cls):
## file-config.h (module 'config-store'): ns3::FileConfig::FileConfig() [constructor]
cls.add_constructor([])
## file-config.h (module 'config-store'): ns3::FileConfig::FileConfig(ns3::FileConfig const & arg0) [copy constructor]
cls.add_constructor([param('ns3::FileConfig const &', 'arg0')])
## file-config.h (module 'config-store'): void ns3::FileConfig::Attributes() [member function]
cls.add_method('Attributes',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## file-config.h (module 'config-store'): void ns3::FileConfig::Default() [member function]
cls.add_method('Default',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## file-config.h (module 'config-store'): void ns3::FileConfig::Global() [member function]
cls.add_method('Global',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## file-config.h (module 'config-store'): void ns3::FileConfig::SetFilename(std::string filename) [member function]
cls.add_method('SetFilename',
'void',
[param('std::string', 'filename')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3GtkConfigStore_methods(root_module, cls):
## gtk-config-store.h (module 'config-store'): ns3::GtkConfigStore::GtkConfigStore(ns3::GtkConfigStore const & arg0) [copy constructor]
cls.add_constructor([param('ns3::GtkConfigStore const &', 'arg0')])
## gtk-config-store.h (module 'config-store'): ns3::GtkConfigStore::GtkConfigStore() [constructor]
cls.add_constructor([])
## gtk-config-store.h (module 'config-store'): void ns3::GtkConfigStore::ConfigureAttributes() [member function]
cls.add_method('ConfigureAttributes',
'void',
[])
## gtk-config-store.h (module 'config-store'): void ns3::GtkConfigStore::ConfigureDefaults() [member function]
cls.add_method('ConfigureDefaults',
'void',
[])
return
def register_Ns3NoneFileConfig_methods(root_module, cls):
## file-config.h (module 'config-store'): ns3::NoneFileConfig::NoneFileConfig(ns3::NoneFileConfig const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NoneFileConfig const &', 'arg0')])
## file-config.h (module 'config-store'): ns3::NoneFileConfig::NoneFileConfig() [constructor]
cls.add_constructor([])
## file-config.h (module 'config-store'): void ns3::NoneFileConfig::Attributes() [member function]
cls.add_method('Attributes',
'void',
[],
is_virtual=True)
## file-config.h (module 'config-store'): void ns3::NoneFileConfig::Default() [member function]
cls.add_method('Default',
'void',
[],
is_virtual=True)
## file-config.h (module 'config-store'): void ns3::NoneFileConfig::Global() [member function]
cls.add_method('Global',
'void',
[],
is_virtual=True)
## file-config.h (module 'config-store'): void ns3::NoneFileConfig::SetFilename(std::string filename) [member function]
cls.add_method('SetFilename',
'void',
[param('std::string', 'filename')],
is_virtual=True)
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & attribute) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'tid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3ConfigStore_methods(root_module, cls):
## config-store.h (module 'config-store'): ns3::ConfigStore::ConfigStore(ns3::ConfigStore const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ConfigStore const &', 'arg0')])
## config-store.h (module 'config-store'): ns3::ConfigStore::ConfigStore() [constructor]
cls.add_constructor([])
## config-store.h (module 'config-store'): void ns3::ConfigStore::ConfigureAttributes() [member function]
cls.add_method('ConfigureAttributes',
'void',
[])
## config-store.h (module 'config-store'): void ns3::ConfigStore::ConfigureDefaults() [member function]
cls.add_method('ConfigureDefaults',
'void',
[])
## config-store.h (module 'config-store'): ns3::TypeId ns3::ConfigStore::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## config-store.h (module 'config-store'): static ns3::TypeId ns3::ConfigStore::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## config-store.h (module 'config-store'): void ns3::ConfigStore::SetFileFormat(ns3::ConfigStore::FileFormat format) [member function]
cls.add_method('SetFileFormat',
'void',
[param('ns3::ConfigStore::FileFormat', 'format')])
## config-store.h (module 'config-store'): void ns3::ConfigStore::SetFilename(std::string filename) [member function]
cls.add_method('SetFilename',
'void',
[param('std::string', 'filename')])
## config-store.h (module 'config-store'): void ns3::ConfigStore::SetMode(ns3::ConfigStore::Mode mode) [member function]
cls.add_method('SetMode',
'void',
[param('ns3::ConfigStore::Mode', 'mode')])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
|
JBonsink/GSOC-2013
|
tools/ns-allinone-3.14.1/ns-3.14.1/src/config-store/bindings/modulegen__gcc_LP64.py
|
Python
|
gpl-3.0
| 54,535
| 0.013588
|
from __future__ import unicode_literals, print_function
from django.urls import reverse
from rest_framework import status
from mezzanine.blog.models import BlogPost as Post
from tests.utils import TestCase
class TestPostViewSet(TestCase):
"""
Test the API resources for blog posts (read and write)
"""
def setUp(self):
"""
Setup the tests
Create some published and draft blog posts for API retrieval testing
"""
super(TestPostViewSet, self).setUp()
# Note for using status:
# from mezzanine.core.models import CONTENT_STATUS_PUBLISHED
# status=CONTENT_STATUS_PUBLISHED
self.post_draft = Post.objects.create(
title="Draft Post Title",
content="Draft Content",
status=1,
user=self.user)
self.post_published = Post.objects.create(
title="Published Post Title",
content="Published Content",
publish_date='2016-01-01T00:00Z',
user=self.user)
def tearDown(self):
"""
Clean up after the tests
"""
super(TestPostViewSet, self).tearDown()
self.post_draft.delete()
self.post_published.delete()
def test_list_published_posts(self):
"""
Test API list all published blog posts
"""
url = reverse('blogpost-list')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response['Content-type'], 'application/json')
self.assertEqual(response.data['count'], 1)
self.assertEqual(response.data['results'][0]['title'], self.post_published.title)
def test_retrieve_published_post(self):
"""
Test API retrieve the published blog post that we created earlier
"""
url = '/api/posts/{}'.format(self.post_published.pk)
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['title'], self.post_published.title)
def test_retrieve_draft_post(self):
"""
Test that retrieving a draft post fails since the API only allows read access to published posts
"""
url = '/api/posts/{}'.format(self.post_draft.pk)
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_create_as_superuser_token(self):
"""
Test API POST CREATE whilst authenticated via OAuth2 as a superuser
"""
# Note: we do not directly provide user here, as API should automatically get and
# authenticate current user as the author
post_data = {'title': 'title1', 'content': 'content1', 'publish_date': '2016-01-01T00:00Z',
'categories': 'Machine Learning,Statistics'}
url = '/api/posts'
response = self.client.post(url, post_data, format='json', HTTP_AUTHORIZATION=self.auth_valid)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Post.objects.get(pk=response.data['id']).user, self.superuser)
self.assertEqual(Post.objects.get(pk=response.data['id']).title, post_data['title'])
self.assertEqual(Post.objects.get(pk=response.data['id']).content, post_data['content'])
self.assertEqual(self.get_categories_as_delim_str(Post.objects.get(pk=response.data['id'])),
post_data['categories'])
def test_create_as_superuser(self):
"""
Test API POST CREATE whilst authenticated as a superuser
"""
post_data = {'title': 'title2', 'content': 'content2', 'publish_date': '2016-01-01T00:00Z',
'categories': 'Machine Learning'}
url = '/api/posts'
self.client.force_authenticate(user=self.superuser)
response = self.client.post(url, post_data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Post.objects.get(pk=response.data['id']).user, self.superuser)
self.assertEqual(Post.objects.get(pk=response.data['id']).title, post_data['title'])
self.assertEqual(Post.objects.get(pk=response.data['id']).content, post_data['content'])
self.assertEqual(self.get_categories_as_delim_str(Post.objects.get(pk=response.data['id'])),
post_data['categories'])
def test_create_as_user(self):
"""
Test API POST CREATE whilst authenticated as a standard user
"""
post_data = {'title': 'a', 'content': 'b', 'publish_date': '2016-01-01T00:00Z'}
url = '/api/posts'
self.client.force_authenticate(user=self.user)
response = self.client.post(url, post_data, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_create_as_guest(self):
"""
Test API POST CREATE whilst unauthenticated as a guest
"""
post_data = {'title': 'a', 'content': 'b', 'publish_date': '2016-01-01T00:00Z'}
url = '/api/posts'
self.client.force_authenticate(user=None)
response = self.client.post(url, post_data, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_update_as_superuser_token(self):
"""
Test API PUT UPDATE whilst authenticated via OAuth2 as a superuser
"""
put_data = {'title': 'a', 'content': 'b', 'categories': 'cat1,cat2'}
url = '/api/posts/{}'.format(self.post_published.pk)
response = self.client.put(url, put_data, format='json', HTTP_AUTHORIZATION=self.auth_valid)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(Post.objects.get(pk=self.post_published.pk).title, put_data['title'])
self.assertEqual(Post.objects.get(pk=self.post_published.pk).content, put_data['content'])
self.assertEqual(self.get_categories_as_delim_str(Post.objects.get(pk=self.post_published.pk)),
put_data['categories'])
def test_update_as_user(self):
"""
Test API PUT UPDATE whilst authenticated as a standard user
"""
put_data = {'title': 'a', 'content': 'b'}
url = '/api/posts/{}'.format(self.post_published.pk)
self.client.force_authenticate(user=self.user)
response = self.client.put(url, put_data, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_update_as_guest(self):
"""
Test API PUT UPDATE whilst unauthenticated as a guest
"""
put_data = {'title': 'a', 'content': 'b'}
url = '/api/posts/{}'.format(self.post_published.pk)
self.client.force_authenticate(user=None)
response = self.client.put(url, put_data, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_update_categories_unchanged(self):
"""
Test API PUT UPDATE title and test categories remain unchanged
whilst authenticated via OAuth2 as a superuser
"""
original_content = Post.objects.get(pk=self.post_published.pk).content
original_categories = self.get_categories_as_delim_str(Post.objects.get(pk=self.post_published.pk))
put_data = {'title': 'updated title'}
url = '/api/posts/{}'.format(self.post_published.pk)
response = self.client.put(url, put_data, format='json', HTTP_AUTHORIZATION=self.auth_valid)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(Post.objects.get(pk=self.post_published.pk).title, put_data['title'])
self.assertEqual(Post.objects.get(pk=self.post_published.pk).content, original_content)
self.assertEqual(self.get_categories_as_delim_str(Post.objects.get(pk=self.post_published.pk)),
original_categories)
def test_update_categories_disassociate_one(self):
"""
Test API PUT UPDATE disassociate category 'cat2'
whilst authenticated via OAuth2 as a superuser
"""
original_title = Post.objects.get(pk=self.post_published.pk).title
original_content = Post.objects.get(pk=self.post_published.pk).content
put_data = {'categories': 'cat1'}
url = '/api/posts/{}'.format(self.post_published.pk)
response = self.client.put(url, put_data, format='json', HTTP_AUTHORIZATION=self.auth_valid)
print(response.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(Post.objects.get(pk=self.post_published.pk).title, original_title)
self.assertEqual(Post.objects.get(pk=self.post_published.pk).content, original_content)
self.assertEqual(self.get_categories_as_delim_str(Post.objects.get(pk=self.post_published.pk)),
put_data['categories'])
def test_update_categories_associate_three(self):
"""
Test API PUT UPDATE associate three new categories
whilst authenticated via OAuth2 as a superuser
"""
original_title = Post.objects.get(pk=self.post_published.pk).title
original_content = Post.objects.get(pk=self.post_published.pk).content
put_data = {'categories': 'cat1,cat2,cat3,cat4'}
url = '/api/posts/{}'.format(self.post_published.pk)
response = self.client.put(url, put_data, format='json', HTTP_AUTHORIZATION=self.auth_valid)
print(response.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(Post.objects.get(pk=self.post_published.pk).title, original_title)
self.assertEqual(Post.objects.get(pk=self.post_published.pk).content, original_content)
self.assertEqual(self.get_categories_as_delim_str(Post.objects.get(pk=self.post_published.pk)),
put_data['categories'])
def test_update_categories_disassociate_all(self):
"""
Test API PUT UPDATE disassociate all categories
whilst authenticated via OAuth2 as a superuser
"""
original_title = Post.objects.get(pk=self.post_published.pk).title
original_content = Post.objects.get(pk=self.post_published.pk).content
put_data = {'categories': ''}
url = '/api/posts/{}'.format(self.post_published.pk)
response = self.client.put(url, put_data, format='json', HTTP_AUTHORIZATION=self.auth_valid)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(Post.objects.get(pk=self.post_published.pk).title, original_title)
self.assertEqual(Post.objects.get(pk=self.post_published.pk).content, original_content)
self.assertEqual(self.get_categories_as_delim_str(Post.objects.get(pk=self.post_published.pk)),
put_data['categories'])
|
gcushen/mezzanine-api
|
tests/test_post.py
|
Python
|
mit
| 10,996
| 0.003365
|
from typing import (
Any,
List,
)
from pcs import resource
from pcs.cli.common.parse_args import InputModifiers
from pcs.cli.common.routing import (
CliCmdInterface,
create_router,
)
def resource_defaults_cmd(parent_cmd: List[str]) -> CliCmdInterface:
def _get_router(
lib: Any, argv: List[str], modifiers: InputModifiers
) -> None:
"""
Options:
* -f - CIB file
* --force - allow unknown options
"""
if argv and "=" in argv[0]:
# DEPRECATED legacy command
return resource.resource_defaults_legacy_cmd(
lib, argv, modifiers, deprecated_syntax_used=True
)
router = create_router(
{
"config": resource.resource_defaults_config_cmd,
"set": create_router(
{
"create": resource.resource_defaults_set_create_cmd,
"delete": resource.resource_defaults_set_remove_cmd,
"remove": resource.resource_defaults_set_remove_cmd,
"update": resource.resource_defaults_set_update_cmd,
},
parent_cmd + ["set"],
),
"update": resource.resource_defaults_legacy_cmd,
},
parent_cmd,
default_cmd="config",
)
return router(lib, argv, modifiers)
return _get_router
def resource_op_defaults_cmd(parent_cmd: List[str]) -> CliCmdInterface:
def _get_router(
lib: Any, argv: List[str], modifiers: InputModifiers
) -> None:
"""
Options:
* -f - CIB file
* --force - allow unknown options
"""
if argv and "=" in argv[0]:
# DEPRECATED legacy command
return resource.resource_op_defaults_legacy_cmd(
lib, argv, modifiers, deprecated_syntax_used=True
)
router = create_router(
{
"config": resource.resource_op_defaults_config_cmd,
"set": create_router(
{
"create": resource.resource_op_defaults_set_create_cmd,
"delete": resource.resource_op_defaults_set_remove_cmd,
"remove": resource.resource_op_defaults_set_remove_cmd,
"update": resource.resource_op_defaults_set_update_cmd,
},
parent_cmd + ["set"],
),
"update": resource.resource_op_defaults_legacy_cmd,
},
parent_cmd,
default_cmd="config",
)
return router(lib, argv, modifiers)
return _get_router
|
tomjelinek/pcs
|
pcs/cli/routing/resource_stonith_common.py
|
Python
|
gpl-2.0
| 2,770
| 0
|
#### PATTERN | EN ##################################################################################
# Copyright (c) 2010 University of Antwerp, Belgium
# Author: Tom De Smedt <tom@organisms.be>
# License: BSD (see LICENSE.txt for details).
# http://www.clips.ua.ac.be/pages/pattern
####################################################################################################
# English linguistical tools using fast regular expressions.
from inflect import \
article, referenced, DEFINITE, INDEFINITE, \
pluralize, singularize, NOUN, VERB, ADJECTIVE, \
conjugate, lemma, lexeme, tenses, VERBS, \
grade, comparative, superlative, COMPARATIVE, SUPERLATIVE, \
predicative, attributive, \
INFINITIVE, PRESENT, PAST, FUTURE, \
FIRST, SECOND, THIRD, \
SINGULAR, PLURAL, SG, PL, \
PROGRESSIVE, \
PARTICIPLE
from inflect.quantify import \
number, numerals, quantify, reflect
from inflect.spelling import \
suggest as spelling
from parser import tokenize, parse, tag
from parser.tree import Text, Sentence, Slice, Chunk, PNPChunk, Chink, Word, table
from parser.tree import SLASH, WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA, AND, OR
from parser.modality import mood, INDICATIVE, IMPERATIVE, CONDITIONAL, SUBJUNCTIVE
from parser.modality import modality, EPISTEMIC
from parser.modality import negated
from parser.sentiment import sentiment, polarity, subjectivity, positive
from parser.sentiment import NOUN, VERB, ADJECTIVE, ADVERB
import wordnet
import wordlist
def parsetree(s, *args, **kwargs):
""" Returns a parsed Text from the given string.
"""
return Text(parse(s, *args, **kwargs))
def split(s, token=[WORD, POS, CHUNK, PNP]):
""" Returns a parsed Text from the given parsed string.
"""
return Text(s, token)
def pprint(string, token=[WORD, POS, CHUNK, PNP], column=4):
""" Pretty-prints the output of parse() as a table with outlined columns.
Alternatively, you can supply a Text or Sentence object.
"""
if isinstance(string, basestring):
print "\n\n".join([table(sentence, fill=column) for sentence in Text(string, token)])
if isinstance(string, Text):
print "\n\n".join([table(sentence, fill=column) for sentence in string])
if isinstance(string, Sentence):
print table(string, fill=column)
def ngrams(string, n=3, continuous=False):
""" Returns a list of n-grams (tuples of n successive words) from the given string.
Alternatively, you can supply a Text or Sentence object.
With continuous=False, n-grams will not run over sentence markers (i.e., .!?).
"""
def strip_period(s, punctuation=set(".:;,!?()[]'\"")):
return [w for w in s if (isinstance(w, Word) and w.string or w) not in punctuation]
if n <= 0:
return []
if isinstance(string, basestring):
s = [strip_period(s.split(" ")) for s in tokenize(string)]
if isinstance(string, Sentence):
s = [strip_period(string)]
if isinstance(string, Text):
s = [strip_period(s) for s in string]
if continuous:
s = [sum(s, [])]
g = []
for s in s:
#s = [None] + s + [None]
g.extend([tuple(s[i:i+n]) for i in range(len(s)-n+1)])
return g
|
decebel/dataAtom_alpha
|
bin/plug/py/external/pattern/text/en/__init__.py
|
Python
|
apache-2.0
| 3,292
| 0.008202
|
"""
=============================
OOB Errors for Random Forests
=============================
The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where
each new tree is fit from a bootstrap sample of the training observations
:math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for
each :math:`z_i` calculated using predictions from the trees that do not
contain :math:`z_i` in their respective bootstrap sample. This allows the
``RandomForestClassifier`` to be fit and validated whilst being trained [1].
The example below demonstrates how the OOB error can be measured at the
addition of each new tree during training. The resulting plot allows a
practitioner to approximate a suitable value of ``n_estimators`` at which the
error stabilizes.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", p592-593, Springer, 2009.
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
# Author: Kian Ho <hui.kian.ho@gmail.com>
# Gilles Louppe <g.louppe@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 Clause
print(__doc__)
RANDOM_STATE = 123
# Generate a binary classification dataset.
X, y = make_classification(n_samples=500, n_features=25,
n_clusters_per_class=1, n_informative=15,
random_state=RANDOM_STATE)
# NOTE: Setting the `warm_start` construction parameter to `True` disables
# support for paralellised ensembles but is necessary for tracking the OOB
# error trajectory during training.
ensemble_clfs = [
("RandomForestClassifier, max_features='sqrt'",
RandomForestClassifier(warm_start=True, oob_score=True,
max_features="sqrt",
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features='log2'",
RandomForestClassifier(warm_start=True, max_features='log2',
oob_score=True,
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features=None",
RandomForestClassifier(warm_start=True, max_features=None,
oob_score=True,
random_state=RANDOM_STATE))
]
# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)
# Range of `n_estimators` values to explore.
min_estimators = 15
max_estimators = 175
for label, clf in ensemble_clfs:
for i in range(min_estimators, max_estimators + 1):
clf.set_params(n_estimators=i)
clf.fit(X, y)
# Record the OOB error for each `n_estimators=i` setting.
oob_error = 1 - clf.oob_score_
error_rate[label].append((i, oob_error))
# Generate the "OOB error rate" vs. "n_estimators" plot.
for label, clf_err in error_rate.items():
xs, ys = zip(*clf_err)
plt.plot(xs, ys, label=label)
plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
plt.show()
|
beepee14/scikit-learn
|
examples/ensemble/plot_ensemble_oob.py
|
Python
|
bsd-3-clause
| 3,265
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import logging
import timeit
import unittest
from haystack.mappings import folder
from haystack.mappings.base import AMemoryMapping
from haystack.mappings.base import MemoryHandler
from haystack.mappings.file import LocalMemoryMapping
import haystack.reverse.enumerators
import haystack.reverse.matchers
from haystack.reverse import searchers
from test.testfiles import zeus_856_svchost_exe
from . import test_pattern
log = logging.getLogger('test_pointerfinder')
class TestPointer(test_pattern.SignatureTests):
def setUp(self):
super(TestPointer, self).setUp()
self.mmap, self.values = self._make_mmap_with_values(self.seq)
self.name = 'test_dump_1'
self.feedback = searchers.NoFeedback()
def _make_mmap_with_values(self, intervals, struct_offset=None):
"""
Make a memory map, with a fake structure of pointer pattern inside.
Return the pattern signature
:param intervals:
:param struct_offset:
:return:
"""
# template of a memory map metadata
self._mstart = 0x0c00000
self._mlength = 4096 # end at (0x0c01000)
# could be 8, it doesn't really matter
self.word_size = self.target.get_word_size()
if struct_offset is not None:
self._struct_offset = struct_offset
else:
self._struct_offset = self.word_size*12 # 12, or any other aligned
mmap,values = self._make_mmap(0x0c00000, 4096, self._struct_offset,
intervals, self.word_size)
# add a reference to mmap in mmap2
ammap2 = AMemoryMapping(0xff7dc000, 0xff7dc000+0x1000, '-rwx', 0, 0, 0, 0, 'test_mmap2')
ammap2.set_ctypes(self.target.get_target_ctypes())
mmap2 = LocalMemoryMapping.fromBytebuffer(ammap2, mmap.get_byte_buffer())
self._memory_handler = MemoryHandler([mmap, mmap2], self.target, 'test')
self.mmap2 = mmap2
return mmap, values
class TestPointerSearcher(TestPointer):
def test_iter(self):
matcher = haystack.reverse.matchers.PointerSearcher(self._memory_handler)
self.pointerSearcher = searchers.WordAlignedSearcher(self.mmap, matcher, self.feedback, self.word_size)
iters = [value for value in self.pointerSearcher]
values = self.pointerSearcher.search()
self.assertEqual(iters, values)
self.assertEqual(self.values, values)
self.assertEqual(self.values, iters)
class TestPointerEnumerator(TestPointer):
def test_iter(self):
matcher = haystack.reverse.matchers.PointerEnumerator(self._memory_handler)
self.pointerEnum = haystack.reverse.enumerators.WordAlignedEnumerator(self.mmap, matcher, self.feedback, self.word_size)
values = [value for offset, value in self.pointerEnum]
offsets = [offset for offset, value in self.pointerEnum]
values_2 = [value for offset, value in self.pointerEnum.search()]
offsets_2 = [offset for offset, value in self.pointerEnum.search()]
self.assertEqual(values, values_2)
self.assertEqual(offsets, offsets_2)
self.assertEqual(self.values, values)
self.assertEqual(self.values, values_2)
nsig = [self._mstart + self._struct_offset]
nsig.extend(self.seq)
indices = [i for i in self._accumulate(nsig)]
self.assertEqual(indices, offsets)
self.assertEqual(indices, offsets_2)
def test_iter_advanced(self):
"""test that pointers to other mappings are detected"""
matcher = haystack.reverse.matchers.PointerEnumerator(self._memory_handler)
self.pointerEnum1 = haystack.reverse.enumerators.WordAlignedEnumerator(self.mmap, matcher, self.feedback, self.word_size)
offsets1, values1 = zip(*self.pointerEnum1.search())
self.pointerEnum2 = haystack.reverse.enumerators.WordAlignedEnumerator(self.mmap2, matcher, self.feedback, self.word_size)
offsets2, values2 = zip(*self.pointerEnum2.search())
self.assertEqual(values1, values2)
self.assertEqual(len(values1), len(self.seq)+1)
class TestPointerEnumeratorReal(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._memory_handler = folder.load(zeus_856_svchost_exe.dumpname)
#cls._memory_handler = folder.load(putty_1_win7.dumpname)
cls._utils = cls._memory_handler.get_target_platform().get_target_ctypes_utils()
return
@classmethod
def tearDownClass(cls):
cls._utils = None
cls._memory_handler.reset_mappings()
cls._memory_handler = None
return
def setUp(self):
self._heap_finder = self._memory_handler.get_heap_finder()
return
def tearDown(self):
self._heap_finder = None
return
def _stats(self, heap_addrs):
# get the weight per mapping
mapdict = {}
for m in self._memory_handler.get_mappings():
mapdict[m.start] = 0
for addr in heap_addrs:
m = self._memory_handler.get_mapping_for_address(addr)
mapdict[m.start] += 1
res = [(v,k) for k,v, in mapdict.items()]
res.sort()
res.reverse()
print('Most used mappings:')
for cnt,s in res:
if cnt == 0:
continue
m = self._memory_handler.get_mapping_for_address(s)
print(cnt, m)
def test_pointer_enumerators(self):
"""
Search pointers values in one HEAP
:return:
"""
# prep the workers
dumpfilename = self._memory_handler.get_name()
word_size = self._memory_handler.get_target_platform().get_word_size()
feedback = searchers.NoFeedback()
matcher = haystack.reverse.matchers.PointerEnumerator(self._memory_handler)
finder = self._memory_handler.get_heap_finder()
walkers = finder.list_heap_walkers()
walker = walkers[0]
heap_addr = walker.get_heap_address()
heap = walker.get_heap_mapping()
# create the enumerator on the whole mapping
enumerator1 = haystack.reverse.enumerators.WordAlignedEnumerator(heap, matcher, feedback, word_size)
# collect the pointers
if False:
###
ts1 = timeit.timeit(enumerator1.search, number=3)
import cProfile, pstats, StringIO
pr = cProfile.Profile()
pr.enable()
# ... do something ...
heap_enum = enumerator1.search()
pr.disable()
s = StringIO.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print(s.getvalue())
###
else:
heap_enum = enumerator1.search()
ts1 = 0.0
heap_addrs1, heap_values1 = zip(*heap_enum)
print('WordAlignedEnumerator: %d pointers, timeit %0.2f' % (len(heap_addrs1), ts1))
self._stats(heap_addrs1)
def test_pointer_enumerators_allocated(self):
"""
Search pointers values in allocated chunks from one HEAP
:return:
"""
# prep the workers
word_size = self._memory_handler.get_target_platform().get_word_size()
feedback = searchers.NoFeedback()
matcher = haystack.reverse.matchers.PointerEnumerator(self._memory_handler)
finder = self._memory_handler.get_heap_finder()
walkers = finder.list_heap_walkers()
heap_walker = walkers[0]
# create the enumerator on the allocated chunks mapping
enumerator2 = haystack.reverse.enumerators.AllocatedWordAlignedEnumerator(heap_walker, matcher, feedback, word_size)
# collect the pointers
if False:
###
ts2 = timeit.timeit(enumerator2.search, number=3)
import cProfile, pstats, StringIO
pr = cProfile.Profile()
pr.enable()
# ... do something ...
heap_enum2 = enumerator2.search()
pr.disable()
s = StringIO.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print(s.getvalue())
###
else:
heap_enum2 = enumerator2.search()
ts2 = 0.0
heap_addrs2, heap_values2 = zip(*heap_enum2)
logging.debug('AllocatedWordAlignedEnumerator: %d pointers, timeit %0.2f', len(heap_addrs2), ts2)
self._stats(heap_addrs2)
def test_pointer_enumerators_all(self):
"""
Search pointers values in all HEAP
:return:
"""
# prep the workers
word_size = self._memory_handler.get_target_platform().get_word_size()
feedback = searchers.NoFeedback()
matcher = haystack.reverse.matchers.PointerEnumerator(self._memory_handler)
finder = self._memory_handler.get_heap_finder()
walkers = finder.list_heap_walkers()
all_heaps_addrs = []
for walker in walkers:
#if heap.start != 0x03360000:
# continue
heap = walker.get_heap_mapping()
log.debug('heap is %s', heap)
# create the enumerator on the allocated chunks mapping
enumerator2 = haystack.reverse.enumerators.WordAlignedEnumerator(heap, matcher, feedback, word_size)
# collect the pointers
heap_enum2 = enumerator2.search()
ts2 = 0.0
if len(heap_enum2) == 0:
logging.debug('Heap %s has no pointers in allocated blocks', heap)
else:
heap_addrs2, heap_values2 = zip(*heap_enum2)
logging.debug('WordAlignedEnumerator: %d pointers, timeit %0.2f', len(heap_addrs2), ts2)
all_heaps_addrs.extend(heap_addrs2)
##
if False:
print("Pointers:")
for k,v in heap_enum2:
print(hex(k), hex(v))
self._stats(all_heaps_addrs)
def test_pointer_enumerators_allocated_all(self):
"""
Search pointers values in allocated chunks from all HEAP
:return:
"""
# prep the workers
word_size = self._memory_handler.get_target_platform().get_word_size()
feedback = searchers.NoFeedback()
matcher = haystack.reverse.matchers.PointerEnumerator(self._memory_handler)
finder = self._memory_handler.get_heap_finder()
walkers = finder.list_heap_walkers()
all_heaps_addrs = []
for heap_walker in walkers:
#if heap.start != 0x03360000:
# continue
heap = heap_walker.get_heap_mapping()
log.debug('heap is %s', heap)
# create the enumerator on the allocated chunks mapping
enumerator2 = haystack.reverse.enumerators.AllocatedWordAlignedEnumerator(heap_walker, matcher, feedback, word_size)
# collect the pointers
heap_enum2 = enumerator2.search()
ts2 = 0.0
if len(heap_enum2) == 0:
logging.debug('Heap %s has no pointers in allocated blocks', heap)
else:
heap_addrs2, heap_values2 = zip(*heap_enum2)
logging.debug('AllocatedWordAlignedEnumerator: %d pointers, timeit %0.2f', len(heap_addrs2), ts2)
all_heaps_addrs.extend(heap_addrs2)
##
if False:
print("Pointers:")
for k,v in heap_enum2:
print(hex(k), hex(v))
print("Allocations:")
for addr, size in heap_walker.get_user_allocations():
print(hex(addr), '->', hex(addr+size), '(%x)'%size)
print("Free chunks:")
for addr, size in heap_walker.get_free_chunks():
print(hex(addr), '->', hex(addr+size), '(%x)'%size)
self._stats(all_heaps_addrs)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
# logging.getLogger("test_pointerfinder").setLevel(logging.DEBUG)
unittest.main()
|
trolldbois/python-haystack-reverse
|
test/haystack/reverse/test_pointerfinder.py
|
Python
|
gpl-3.0
| 12,279
| 0.003339
|
class hheap(dict):
@staticmethod
def _parent(i): # please use bit operation (same below)!
return (i-1)>>1
@staticmethod
def _left(i):
return (i<<1) + 1
@staticmethod
def _right(i):
return (i<<1) + 2
'''
Structure is the following
inside the heap we have a list
[position,value]
which means the dictionary holds a list
[position,value,key]
'''
def __init__(self):
self.heap = []
self.hLength = -1
dict.__init__(self)
def __setitem__(self,key,value):
if dict.__contains__(self,key):
item = dict.__getitem__(self,key)
item[1] = value
if item[1] < self.heap[self._parent(item[0])][1]:
self.heapup(item[0])
else:
self.heapdown(item[0])
else:
self.hLength += 1
self.heap.append([self.hLength,value,key])
dict.__setitem__(self,key,self.heap[-1])
self.heapup(self.hLength)
def __getitem__(self,key):
'''Get item retrieves the value of the given key '''
if dict.__contains__(self,key):
return dict.__getitem__(self,key)[1]
raise KeyError("Key does not exist")
def heapup(self,index):
''' Maintains the property of a heap by checking its parent, mostly used after insertion'''
parent = self._parent(index)
if parent is -1:
return
if self.heap[index][1] < self.heap[parent][1]:
self._swap(index,parent)
return self.heapup(parent)
if self.heap[index][1] == self.heap[parent][1]:
if self.heap[index][2] < self.heap[parent][2]:
self._swap(index,parent)
return self.heapup(parent)
return
def heapdown(self,index=0):
''' Maintains the property of a heap by checking its children '''
leftChild = self._left(index)
rightChild = self._right(index)
last = len(self.heap)-1
if leftChild > last:
return
elif rightChild > last:
if self.heap[leftChild][1] < self.heap[index][1]:
self._swap(index,leftChild)
return self.heapdown(leftChild)
else:
if self.heap[rightChild][1] < self.heap[leftChild][1]:
min = rightChild
else:
min = leftChild
if self.heap[index][1] > self.heap[min][1]:
self._swap(index,min)
if self.heap[index][1] == self.heap[min][1]:
if self.heap[index][2] > self.heap[min][2]:
self._swap(index,min)
return self.heapdown(min)
def _swap(self, i, j):
"""swap the contents b/w indices i and j; update hash accordingly"""
#swap within the heap
self.heap[i][0],self.heap[j][0] = j,i
self.heap[i],self.heap[j] = self.heap[j],self.heap[i]
def pop(self):
# pop root (best)
#display the soon to be popped item
popped = self.heap[0]
#remove from dict and heap
dict.__delitem__(self,popped[2])
self._swap(0,self.hLength)
self.heap.pop()
self.heapdown()
self.hLength-=1
return popped
def update_if_better(self, key, newvalue,viakey=None):
"""update if newvalue is better than the current value for key
or insert if key is not here yet."""
if dict.__contains__(self,key):
self[key] = min(self[key],newvalue)
info = dict.__getitem__(self,key)
if self[key] == newvalue:
if len(info) is 3:
info.append(viakey)
else:
info[3] = viakey
else:
self[key] = newvalue
def Display(self,arry):
#print arry
if len(arry) is 4:
print arry[2]+" "+str(arry[1])+ " (via "+arry[3]+")"
else:
if arry[1] == float("+inf"):
print str(arry[2])+" "+"unreachable"
else:
print str(arry[2])+" "+str(arry[1])
# def GenerateItems(self):
# for x in self.heap:
# yield x
def __str__(self):
string = "{"
string += ', '.join(["'" + item[0]+ "'" + ": "+str(item[1][1]) for item in sorted(self.items(),key = lambda x: x[1][1])])
string +="}"
return string
__repr__ = __str__
|
Bedrock02/General-Coding
|
Search/Dijkstra/hheap.py
|
Python
|
mit
| 3,638
| 0.053051
|
import importlib
import os
import sys
here = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def get_version() -> str:
"""
Return version.
"""
sys.path.insert(0, here)
return importlib.import_module("a2wsgi").__version__
os.chdir(here)
os.system(f"poetry version {get_version()}")
os.system("git add a2wsgi/* pyproject.toml")
os.system(f'git commit -m "v{get_version()}"')
os.system("git push")
os.system("git tag v{0}".format(get_version()))
os.system("git push --tags")
|
abersheeran/a2wsgi
|
script/version.py
|
Python
|
apache-2.0
| 509
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.