repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
gangadharkadam/vervefrappe
|
refs/heads/v5.0
|
frappe/website/doctype/contact_us_settings/contact_us_settings.py
|
73
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class ContactUsSettings(Document):
def on_update(self):
from frappe.website.render import clear_cache
clear_cache("contact")
|
zdary/intellij-community
|
refs/heads/master
|
python/testData/quickFixes/PyRemoveUnusedLocalQuickFixTest/removeChainedAssignmentStatementUnpackingFirstTarget.py
|
9
|
def f():
a = <caret>unused, b = 42, 42
return a, b
|
tacaswell/pims
|
refs/heads/master
|
pims/tests/test_common.py
|
1
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
import tempfile
import zipfile
import sys
import random
import types
import unittest
import pickle
from io import BytesIO
import nose
import numpy as np
from numpy.testing import (assert_equal, assert_allclose)
from nose.tools import assert_true
import pims
path, _ = os.path.split(os.path.abspath(__file__))
path = os.path.join(path, 'data')
def _skip_if_no_PyAV():
import pims.pyav_reader
if not pims.pyav_reader.available():
raise nose.SkipTest('PyAV not found. Skipping.')
def _skip_if_no_MoviePy():
import pims.moviepy_reader
if not pims.moviepy_reader.available():
raise nose.SkipTest('MoviePy not found. Skipping.')
def _skip_if_no_ImageIO():
import pims.imageio_reader
if not pims.imageio_reader.available():
raise nose.SkipTest('ImageIO not found. Skipping.')
def _skip_if_no_libtiff():
try:
import libtiff
except ImportError:
raise nose.SkipTest('libtiff not installed. Skipping.')
def _skip_if_no_tifffile():
try:
import tifffile
except ImportError:
raise nose.SkipTest('tifffile not installed. Skipping.')
def _skip_if_no_imread():
if pims.image_sequence.imread is None:
raise nose.SkipTest('ImageSequence requires either scipy, matplotlib or'
' scikit-image. Skipping.')
def _skip_if_no_skimage():
try:
import skimage
except ImportError:
raise nose.SkipTest('skimage not installed. Skipping.')
def _skip_if_no_PIL():
try:
from PIL import Image
except ImportError:
raise nose.SkipTest('PIL/Pillow not installed. Skipping.')
def assert_image_equal(actual, expected):
if np.issubdtype(actual.dtype, np.integer):
assert_equal(actual, expected)
else:
if np.issubdtype(expected.dtype, np.integer):
expected = expected/float(np.iinfo(expected.dtype).max)
assert_allclose(actual, expected, atol=1/256.)
def save_dummy_png(filepath, filenames, shape):
from PIL import Image
if not os.path.isdir(filepath):
os.mkdir(filepath)
frames = []
for f in filenames:
dummy = np.random.randint(0, 255, shape).astype('uint8')
im = Image.fromarray(dummy)
im.save(os.path.join(filepath, f), 'png')
frames.append(dummy)
return frames
def clean_dummy_png(filepath, filenames):
for f in filenames:
os.remove(os.path.join(filepath, f))
if os.listdir(filepath) == []:
os.rmdir(filepath)
class _image_single(object):
def check_skip(self):
pass
def test_bool(self):
self.check_skip()
pass
def test_integer_attributes(self):
self.check_skip()
assert_equal(len(self.v.frame_shape), len(self.expected_shape))
self.assertTrue(isinstance(self.v.frame_shape[0], six.integer_types))
self.assertTrue(isinstance(self.v.frame_shape[1], six.integer_types))
self.assertTrue(isinstance(len(self.v), six.integer_types))
def test_shape(self):
self.check_skip()
assert_equal(self.v.frame_shape, self.expected_shape)
def test_count(self):
self.check_skip()
assert_equal(len(self.v), self.expected_len)
def test_repr(self):
self.check_skip()
# simple smoke test, values not checked
repr(self.v)
class _deprecated_functions(object):
def test_dtype_conversion(self):
self.check_skip()
v8 = self.klass(self.filename, dtype='uint8', **self.kwargs)
v16 = self.klass(self.filename, dtype='uint16', **self.kwargs)
type8 = v8[0].dtype
type16 = v16[0].dtype
self.assertEqual(type8, np.uint8)
self.assertEqual(type16, np.uint16)
def test_process_func(self):
self.check_skip()
# Use a trivial identity function to verify the process_func exists.
f = lambda x: x
self.klass(self.filename, process_func=f, **self.kwargs)
# Also, it should be the second positional arg for each class.
# This is verified more directly in later tests, too.
self.klass(self.filename, f, **self.kwargs)
def test_inversion_process_func(self):
self.check_skip()
def invert(image):
if np.issubdtype(image.dtype, np.integer):
max_value = np.iinfo(image.dtype).max
image = image ^ max_value
else:
image = 1 - image
return image
v_raw = self.klass(self.filename, **self.kwargs)
v = self.klass(self.filename, invert, **self.kwargs)
assert_image_equal(v[0], invert(v_raw[0]))
def box(letter):
return pims.Frame(np.array(letter))
def assert_letters_equal(actual, expected):
for actual_, expected_ in zip(actual, expected):
# This contrived reader has weird shape behavior,
# but that's not what I'm testing here.
assert_equal(actual_.reshape((1, 1)), box(expected_).reshape((1, 1)))
def compare_slice_to_list(actual, expected):
assert_letters_equal(actual, expected)
# test lengths
actual_len = len(actual)
assert_equal(actual_len, len(expected))
indices = list(range(len(actual)))
for i in indices:
# test positive indexing
assert_letters_equal(actual[i], expected[i])
# test negative indexing
assert_letters_equal(actual[-i + 1], expected[-i + 1])
# in reverse order
for i in indices[::-1]:
assert_letters_equal(actual[i], expected[i])
assert_letters_equal(actual[-i + 1], expected[-i + 1])
# in shuffled order (using a consistent random seed)
r = random.Random(5)
r.shuffle(indices)
for i in indices:
assert_letters_equal(actual[i], expected[i])
assert_letters_equal(actual[-i + 1], expected[-i + 1])
# test list indexing
some_indices = [r.choice(indices) for _ in range(2)]
assert_letters_equal(actual[some_indices],
np.array(expected)[some_indices])
# mixing positive and negative indices
some_indices = [r.choice(indices + [-i-1 for i in indices])
for _ in range(2)]
assert_letters_equal(actual[some_indices],
np.array(expected)[some_indices])
# test slices
assert_letters_equal(actual[::2], expected[::2])
assert_letters_equal(actual[1::2], expected[1::2])
assert_letters_equal(actual[::3], expected[::3])
assert_letters_equal(actual[1:], expected[1:])
assert_letters_equal(actual[:], expected[:])
assert_letters_equal(actual[:-1], expected[:-1])
class TestRecursiveSlicing(unittest.TestCase):
def setUp(self):
_skip_if_no_imread()
class DemoReader(pims.ImageSequence):
def imread(self, filename, **kwargs):
return np.array([[filename]])
self.v = DemoReader(list('abcdefghij'))
def test_slice_of_slice(self):
slice1 = self.v[4:]
compare_slice_to_list(slice1, list('efghij'))
slice2 = slice1[-3:]
compare_slice_to_list(slice2, list('hij'))
slice1a = self.v[[3, 4, 5, 6, 7, 8, 9]]
compare_slice_to_list(slice1a, list('defghij'))
slice2a = slice1a[::2]
compare_slice_to_list(slice2a, list('dfhj'))
slice2b = slice1a[::-1]
compare_slice_to_list(slice2b, list('jihgfed'))
slice2c = slice1a[::-2]
compare_slice_to_list(slice2c, list('jhfd'))
print('slice2d')
slice2d = slice1a[:0:-1]
compare_slice_to_list(slice2d, list('jihgfe'))
slice2e = slice1a[-1:1:-1]
compare_slice_to_list(slice2e, list('jihgf'))
slice2f = slice1a[-2:1:-1]
compare_slice_to_list(slice2f, list('ihgf'))
slice2g = slice1a[::-3]
compare_slice_to_list(slice2g, list('jgd'))
slice2h = slice1a[[5, 6, 2, -1, 3, 3, 3, 0]]
compare_slice_to_list(slice2h, list('ijfjgggd'))
def test_slice_of_slice_of_slice(self):
slice1 = self.v[4:]
compare_slice_to_list(slice1, list('efghij'))
slice2 = slice1[1:-1]
compare_slice_to_list(slice2, list('fghi'))
slice2a = slice1[[2, 3, 4]]
compare_slice_to_list(slice2a, list('ghi'))
slice3 = slice2[1::2]
compare_slice_to_list(slice3, list('gi'))
def test_slice_of_slice_of_slice_of_slice(self):
# Take the red pill. It's slices all the way down!
slice1 = self.v[4:]
compare_slice_to_list(slice1, list('efghij'))
slice2 = slice1[1:-1]
compare_slice_to_list(slice2, list('fghi'))
slice3 = slice2[1:]
compare_slice_to_list(slice3, list('ghi'))
slice4 = slice3[1:]
compare_slice_to_list(slice4, list('hi'))
# Give me another!
slice1 = self.v[2:]
compare_slice_to_list(slice1, list('cdefghij'))
slice2 = slice1[0::2]
compare_slice_to_list(slice2, list('cegi'))
slice3 = slice2[:]
compare_slice_to_list(slice3, list('cegi'))
print('define slice4')
slice4 = slice3[:-1]
print('compare slice4')
compare_slice_to_list(slice4, list('ceg'))
print('define slice4a')
slice4a = slice3[::-1]
print('compare slice4a')
compare_slice_to_list(slice4a, list('igec'))
def test_slice_with_generator(self):
slice1 = self.v[1:]
compare_slice_to_list(slice1, list('bcdefghij'))
slice2 = slice1[(i for i in range(2,5))]
assert_letters_equal(slice2, list('def'))
assert_true(isinstance(slice2, types.GeneratorType))
def _rescale(img):
print(type(img))
return (img - img.min()) / img.ptp()
def _color_channel(img, channel):
if img.ndim == 3:
return img[:, :, channel]
else:
return img
class _image_series(_image_single):
def test_iterator(self):
self.check_skip()
i = iter(self.v)
assert_image_equal(next(i), self.frame0)
assert_image_equal(next(i), self.frame1)
def test_getting_slice(self):
self.check_skip()
tmp = list(self.v[0:2])
frame0, frame1 = tmp
assert_image_equal(frame0, self.frame0)
assert_image_equal(frame1, self.frame1)
def test_slice_of_slice(self):
# More thorough recursive slicing tests, making use of more than
# the two frames available for these tests, are elsewhere:
# see test_recursive_slicing.
self.check_skip()
tmp = self.v[0:2]
tmp1 = tmp[1:]
frame1 = tmp1[0]
assert_image_equal(frame1, self.frame1)
# Do the same thing again, show that the generators are not dead.
tmp1 = tmp[1:]
frame1 = tmp1[0]
assert_image_equal(frame1, self.frame1)
frame0 = tmp[0]
assert_image_equal(frame0, self.frame0)
# Show that we can listify the slice twice.
frame0, frame1 = list(tmp)
assert_image_equal(frame0, self.frame0)
assert_image_equal(frame1, self.frame1)
frame0, frame1 = list(tmp)
assert_image_equal(frame0, self.frame0)
assert_image_equal(frame1, self.frame1)
def test_pipeline_simple(self):
rescale = pims.pipeline(_rescale)
rescaled_v = rescale(self.v[:1])
assert_image_equal(rescaled_v[0], _rescale(self.frame0))
def test_pipeline_with_args(self):
color_channel = pims.pipeline(_color_channel)
red = color_channel(self.v, 0)
green = color_channel(self.v, 1)
assert_image_equal(red[0], _color_channel(self.frame0, 0))
assert_image_equal(green[0], _color_channel(self.frame0, 1))
# Multiple pipelines backed by the same data are indep,
# so this call to red is unaffected by green above.
assert_image_equal(red[0], _color_channel(self.frame0, 0))
def test_composed_pipelines(self):
color_channel = pims.pipeline(_color_channel)
rescale = pims.pipeline(_rescale)
composed = rescale(color_channel(self.v, 0))
expected = _rescale(_color_channel(self.v[0], 0))
assert_image_equal(composed[0], expected)
def test_getting_single_frame(self):
self.check_skip()
assert_image_equal(self.v[0], self.frame0)
assert_image_equal(self.v[0], self.frame0)
assert_image_equal(self.v[1], self.frame1)
assert_image_equal(self.v[1], self.frame1)
def test_getting_list(self):
self.check_skip()
actual = list(self.v[[1, 0, 0, 1, 1]])
expected = [self.frame1, self.frame0, self.frame0, self.frame1,
self.frame1]
[assert_image_equal(a, b) for a, b in zip(actual, expected)]
def test_frame_number_present(self):
self.check_skip()
for frame_no in [0, 1, 2, 1]:
self.assertTrue(hasattr(self.v[frame_no], 'frame_no'))
not_none = self.v[frame_no].frame_no is not None
self.assertTrue(not_none)
def test_frame_number_accurate(self):
self.check_skip()
for frame_no in [0, 1, 2, 1]:
self.assertEqual(self.v[frame_no].frame_no, frame_no)
def test_simple_negative_index(self):
self.check_skip()
self.v[-1]
list(self.v[[0, -1]])
class _image_rgb(_image_single):
# Only include these tests for 2D RGB files.
def test_greyscale_process_func(self):
self.check_skip()
def greyscale(image):
assert image.ndim == 3
image = image[:, :, 0]
assert image.ndim == 2
return image
v_raw = self.klass(self.filename, **self.kwargs)
v = self.klass(self.filename, greyscale, **self.kwargs)
assert_image_equal(v[0], greyscale(v_raw[0]))
def test_as_grey(self):
self.check_skip()
v = self.klass(self.filename, as_grey=True, **self.kwargs)
ndim = v[0].ndim
self.assertEqual(ndim, 2)
class TestVideo_PyAV(_image_series, _image_rgb, _deprecated_functions,
unittest.TestCase):
def check_skip(self):
_skip_if_no_PyAV()
def setUp(self):
_skip_if_no_PyAV()
self.filename = os.path.join(path, 'bulk-water.mov')
self.frame0 = np.load(os.path.join(path, 'bulk-water_frame0.npy'))
self.frame1 = np.load(os.path.join(path, 'bulk-water_frame1.npy'))
self.klass = pims.PyAVVideoReader
self.kwargs = dict()
self.v = self.klass(self.filename, **self.kwargs)
self.expected_shape = (640, 424, 3) # (x, y), wrong convention?
self.expected_len = 480
class TestVideo_ImageIO(_image_series, unittest.TestCase):
def check_skip(self):
_skip_if_no_ImageIO()
def setUp(self):
_skip_if_no_ImageIO()
self.filename = os.path.join(path, 'bulk-water.mov')
self.frame0 = np.load(os.path.join(path, 'bulk-water_frame0.npy'))
self.frame1 = np.load(os.path.join(path, 'bulk-water_frame1.npy'))
self.klass = pims.ImageIOReader
self.kwargs = dict()
self.v = self.klass(self.filename, **self.kwargs)
self.expected_shape = (424, 640, 3)
self.expected_len = 480
class TestVideo_MoviePy(_image_series, unittest.TestCase):
def check_skip(self):
_skip_if_no_MoviePy()
def setUp(self):
_skip_if_no_MoviePy()
self.filename = os.path.join(path, 'bulk-water.mov')
self.frame0 = np.load(os.path.join(path, 'bulk-water_frame0.npy'))
self.frame1 = np.load(os.path.join(path, 'bulk-water_frame1.npy'))
self.klass = pims.MoviePyReader
self.kwargs = dict()
self.v = self.klass(self.filename, **self.kwargs)
self.expected_shape = (424, 640, 3)
self.expected_len = 480
class _tiff_image_series(_image_series, _deprecated_functions):
def test_metadata(self):
m = self.v[0].metadata
if sys.version_info.major < 3:
pkl_path = os.path.join(path, 'stuck_metadata_py2.pkl')
else:
pkl_path = os.path.join(path, 'stuck_metadata_py3.pkl')
with open(pkl_path, 'rb') as p:
d = pickle.load(p)
assert_equal(m, d)
class TestTiffStack_libtiff(_tiff_image_series, _deprecated_functions,
unittest.TestCase):
def check_skip(self):
_skip_if_no_libtiff()
def setUp(self):
_skip_if_no_libtiff()
self.filename = os.path.join(path, 'stuck.tif')
self.frame0 = np.load(os.path.join(path, 'stuck_frame0.npy'))
self.frame1 = np.load(os.path.join(path, 'stuck_frame1.npy'))
self.klass = pims.TiffStack_libtiff
self.kwargs = dict()
self.v = self.klass(self.filename, **self.kwargs)
self.expected_shape = (512, 512)
self.expected_len = 5
class TestImageSequenceWithPIL(_image_series, _deprecated_functions,
unittest.TestCase):
def setUp(self):
_skip_if_no_skimage()
self.filepath = os.path.join(path, 'image_sequence')
self.filenames = ['T76S3F00001.png', 'T76S3F00002.png',
'T76S3F00003.png', 'T76S3F00004.png',
'T76S3F00005.png']
shape = (10, 11)
frames = save_dummy_png(self.filepath, self.filenames, shape)
self.filename = os.path.join(self.filepath, '*.png')
self.frame0 = frames[0]
self.frame1 = frames[1]
self.kwargs = dict(plugin='pil')
self.klass = pims.ImageSequence
self.v = self.klass(self.filename, **self.kwargs)
self.expected_shape = shape
self.expected_len = 5
self.tempdir = tempfile.mkdtemp()
self.tempfile = os.path.join(self.tempdir, 'test.zip')
with zipfile.ZipFile(self.tempfile, 'w') as archive:
for fn in self.filenames:
archive.write(os.path.join(self.filepath, fn))
def test_bad_path_raises(self):
raises = lambda: pims.ImageSequence('this/path/does/not/exist/*.jpg')
self.assertRaises(IOError, raises)
def test_zipfile(self):
pims.ImageSequence(self.tempfile)[0]
def tearDown(self):
clean_dummy_png(self.filepath, self.filenames)
os.remove(self.tempfile)
os.rmdir(self.tempdir)
class TestImageSequenceWithMPL(_image_series, _deprecated_functions,
unittest.TestCase):
def setUp(self):
_skip_if_no_skimage()
self.filepath = os.path.join(path, 'image_sequence')
self.filenames = ['T76S3F00001.png', 'T76S3F00002.png',
'T76S3F00003.png', 'T76S3F00004.png',
'T76S3F00005.png']
shape = (10, 11)
frames = save_dummy_png(self.filepath, self.filenames, shape)
self.filename = os.path.join(self.filepath, '*.png')
self.frame0 = frames[0]
self.frame1 = frames[1]
self.kwargs = dict(plugin='matplotlib')
self.klass = pims.ImageSequence
self.v = self.klass(self.filename, **self.kwargs)
self.expected_shape = shape
self.expected_len = 5
def tearDown(self):
clean_dummy_png(self.filepath, self.filenames)
class TestImageSequenceAcceptsList(_image_series, _deprecated_functions,
unittest.TestCase):
def setUp(self):
_skip_if_no_imread()
self.filepath = os.path.join(path, 'image_sequence')
self.filenames = ['T76S3F00001.png', 'T76S3F00002.png',
'T76S3F00003.png', 'T76S3F00004.png',
'T76S3F00005.png']
shape = (10, 11)
frames = save_dummy_png(self.filepath, self.filenames, shape)
self.filename = [os.path.join(self.filepath, fn)
for fn in self.filenames]
self.frame0 = frames[0]
self.frame1 = frames[1]
self.kwargs = dict(plugin='matplotlib')
self.klass = pims.ImageSequence
self.v = self.klass(self.filename, **self.kwargs)
self.expected_shape = shape
self.expected_len = len(self.filenames)
def tearDown(self):
clean_dummy_png(self.filepath, self.filenames)
class TestImageSequenceNaturalSorting(_image_series, _deprecated_functions,
unittest.TestCase):
def setUp(self):
_skip_if_no_imread()
self.filepath = os.path.join(path, 'image_sequence')
self.filenames = ['T76S3F1.png', 'T76S3F20.png',
'T76S3F3.png', 'T76S3F4.png',
'T76S3F50.png', 'T76S3F10.png']
shape = (10, 11)
frames = save_dummy_png(self.filepath, self.filenames, shape)
self.filename = [os.path.join(self.filepath, fn)
for fn in self.filenames]
self.frame0 = frames[0]
self.frame1 = frames[2]
self.kwargs = dict(plugin='matplotlib')
self.klass = pims.ImageSequence
self.v = self.klass(self.filename, **self.kwargs)
self.expected_shape = shape
self.expected_len = len(self.filenames)
sorted_files = ['T76S3F1.png',
'T76S3F3.png',
'T76S3F4.png',
'T76S3F10.png',
'T76S3F20.png',
'T76S3F50.png']
assert sorted_files == [x.split(os.path.sep)[-1] for x in self.v._filepaths]
def tearDown(self):
clean_dummy_png(self.filepath, self.filenames)
class TestTiffStack_pil(_tiff_image_series, _deprecated_functions,
unittest.TestCase):
def check_skip(self):
pass
def setUp(self):
_skip_if_no_PIL()
self.filename = os.path.join(path, 'stuck.tif')
self.frame0 = np.load(os.path.join(path, 'stuck_frame0.npy'))
self.frame1 = np.load(os.path.join(path, 'stuck_frame1.npy'))
self.klass = pims.TiffStack_pil
self.kwargs = dict()
self.v = self.klass(self.filename, **self.kwargs)
self.expected_shape = (512, 512)
self.expected_len = 5
class TestTiffStack_tifffile(_tiff_image_series, _deprecated_functions,
unittest.TestCase):
def check_skip(self):
pass
def setUp(self):
_skip_if_no_tifffile()
self.filename = os.path.join(path, 'stuck.tif')
self.frame0 = np.load(os.path.join(path, 'stuck_frame0.npy'))
self.frame1 = np.load(os.path.join(path, 'stuck_frame1.npy'))
self.klass = pims.TiffStack_tifffile
self.kwargs = dict()
self.v = self.klass(self.filename, **self.kwargs)
self.expected_shape = (512, 512)
self.expected_len = 5
class TestSpeStack(_image_series, _deprecated_functions,
unittest.TestCase):
def check_skip(self):
pass
def setUp(self):
self.filename = os.path.join(path, 'spestack_test.spe')
self.frame0 = np.load(os.path.join(path, 'spestack_test_frame0.npy'))
self.frame1 = np.load(os.path.join(path, 'spestack_test_frame1.npy'))
self.klass = pims.SpeStack
self.kwargs = dict()
self.v = self.klass(self.filename, **self.kwargs)
self.expected_shape = (128, 128)
self.expected_len = 5
def test_metadata(self):
m = self.v.metadata
with open(os.path.join(path, 'spestack_test_metadata.pkl'), 'rb') as p:
if sys.version_info.major < 3:
d = pickle.load(p)
else:
d = pickle.load(p, encoding="latin1")
#spare4 is actually a byte array
d["spare4"] = d["spare4"].encode("latin1")
assert_equal(m, d)
class TestOpenFiles(unittest.TestCase):
def setUp(self):
_skip_if_no_PIL()
def test_open_pngs(self):
self.filepath = os.path.join(path, 'image_sequence')
self.filenames = ['T76S3F00001.png', 'T76S3F00002.png',
'T76S3F00003.png', 'T76S3F00004.png',
'T76S3F00005.png']
shape = (10, 11)
save_dummy_png(self.filepath, self.filenames, shape)
pims.open(os.path.join(path, 'image_sequence', '*.png'))
clean_dummy_png(self.filepath, self.filenames)
def test_open_mov(self):
_skip_if_no_PyAV()
pims.open(os.path.join(path, 'bulk-water.mov'))
def test_open_tiff(self):
_skip_if_no_tifffile()
pims.open(os.path.join(path, 'stuck.tif'))
class ImageSequenceND(_image_series, _deprecated_functions, unittest.TestCase):
def setUp(self):
_skip_if_no_imread()
self.filepath = os.path.join(path, 'image_sequence3d')
self.filenames = ['file_t001_z001_c1.png',
'file_t001_z001_c2.png',
'file_t001_z002_c1.png',
'file_t001_z002_c2.png',
'file_t002_z001_c1.png',
'file_t002_z001_c2.png',
'file_t002_z002_c1.png',
'file_t002_z002_c2.png',
'file_t003_z001_c1.png',
'file_t003_z001_c2.png',
'file_t003_z002_c1.png',
'file_t003_z002_c2.png']
shape = (10, 11)
frames = save_dummy_png(self.filepath, self.filenames, shape)
self.filename = os.path.join(self.filepath, '*.png')
self.frame0 = np.array([frames[0], frames[2]])
self.frame1 = np.array([frames[4], frames[6]])
self.klass = pims.ImageSequenceND
self.kwargs = dict(axes_identifiers='tzc')
self.v = self.klass(self.filename, **self.kwargs)
self.v.default_coords['c'] = 0
self.expected_len = 3
self.expected_Z = 2
self.expected_C = 2
self.expected_shape = (self.expected_Z,) + shape
def tearDown(self):
clean_dummy_png(self.filepath, self.filenames)
def test_filename_tzc(self):
tzc = pims.image_sequence.filename_to_indices('file_t01_z005_c4.png')
self.assertEqual(tzc, [1, 5, 4])
tzc = pims.image_sequence.filename_to_indices('t01file_t01_z005_c4.png')
self.assertEqual(tzc, [1, 5, 4])
tzc = pims.image_sequence.filename_to_indices('file_z005_c4_t01.png')
self.assertEqual(tzc, [1, 5, 4])
tzc = pims.image_sequence.filename_to_indices(u'file\u03BC_z05_c4_t01.png')
self.assertEqual(tzc, [1, 5, 4])
tzc = pims.image_sequence.filename_to_indices('file_t9415_z005.png')
self.assertEqual(tzc, [9415, 5, 0])
tzc = pims.image_sequence.filename_to_indices('file_t47_c34.png')
self.assertEqual(tzc, [47, 0, 34])
tzc = pims.image_sequence.filename_to_indices('file_z4_c2.png')
self.assertEqual(tzc, [0, 4, 2])
tzc = pims.image_sequence.filename_to_indices('file_p4_c2_q5_r1.png',
['p', 'q', 'r'])
self.assertEqual(tzc, [4, 5, 1])
def test_sizeZ(self):
self.check_skip()
assert_equal(self.v.sizes['z'], self.expected_Z)
def test_sizeC(self):
self.check_skip()
assert_equal(self.v.sizes['c'], self.expected_C)
class ImageSequenceND_RGB(_image_series, _deprecated_functions,
unittest.TestCase):
def setUp(self):
_skip_if_no_imread()
self.filepath = os.path.join(path, 'image_sequence3d')
self.filenames = ['file_t001_z001_c1.png',
'file_t001_z002_c1.png',
'file_t002_z001_c1.png',
'file_t002_z002_c1.png',
'file_t003_z001_c1.png',
'file_t003_z002_c1.png']
shape = (10, 11, 3)
frames = save_dummy_png(self.filepath, self.filenames, shape)
self.filename = os.path.join(self.filepath, '*.png')
self.frame0 = np.array([frames[0][:, :, 0], frames[1][:, :, 0]])
self.frame1 = np.array([frames[2][:, :, 0], frames[3][:, :, 0]])
self.klass = pims.ImageSequenceND
self.kwargs = dict(axes_identifiers='tz')
self.v = self.klass(self.filename, **self.kwargs)
self.v.default_coords['c'] = 0
self.expected_len = 3
self.expected_Z = 2
self.expected_C = 3
self.expected_shape = (2, 10, 11)
def test_sizeZ(self):
self.check_skip()
assert_equal(self.v.sizes['z'], self.expected_Z)
def test_sizeC(self):
self.check_skip()
assert_equal(self.v.sizes['c'], self.expected_C)
def tearDown(self):
clean_dummy_png(self.filepath, self.filenames)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
geo-poland/frappe
|
refs/heads/develop
|
frappe/website/doctype/user_vote/__init__.py
|
12133432
| |
rahushen/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/network/nso/__init__.py
|
12133432
| |
filias/django
|
refs/heads/master
|
django/conf/locale/en/__init__.py
|
12133432
| |
arun6582/django
|
refs/heads/master
|
tests/test_client_regress/__init__.py
|
12133432
| |
LeoTestard/qt-ubuntu-components
|
refs/heads/master
|
tests/autopilot/tavastia/tests/progressbar/__init__.py
|
12133432
| |
dkdewitt/werkzeug
|
refs/heads/master
|
tests/contrib/test_wrappers.py
|
25
|
# -*- coding: utf-8 -*-
"""
tests.contrib.wrappers
~~~~~~~~~~~~~~~~~~~~~~
Added tests for the sessions.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
from werkzeug.contrib import wrappers
from werkzeug import routing
from werkzeug.wrappers import Request, Response
def test_reverse_slash_behavior():
class MyRequest(wrappers.ReverseSlashBehaviorRequestMixin, Request):
pass
req = MyRequest.from_values('/foo/bar', 'http://example.com/test')
assert req.url == 'http://example.com/test/foo/bar'
assert req.path == 'foo/bar'
assert req.script_root == '/test/'
# make sure the routing system works with the slashes in
# reverse order as well.
map = routing.Map([routing.Rule('/foo/bar', endpoint='foo')])
adapter = map.bind_to_environ(req.environ)
assert adapter.match() == ('foo', {})
adapter = map.bind(req.host, req.script_root)
assert adapter.match(req.path) == ('foo', {})
def test_dynamic_charset_request_mixin():
class MyRequest(wrappers.DynamicCharsetRequestMixin, Request):
pass
env = {'CONTENT_TYPE': 'text/html'}
req = MyRequest(env)
assert req.charset == 'latin1'
env = {'CONTENT_TYPE': 'text/html; charset=utf-8'}
req = MyRequest(env)
assert req.charset == 'utf-8'
env = {'CONTENT_TYPE': 'application/octet-stream'}
req = MyRequest(env)
assert req.charset == 'latin1'
assert req.url_charset == 'latin1'
MyRequest.url_charset = 'utf-8'
env = {'CONTENT_TYPE': 'application/octet-stream'}
req = MyRequest(env)
assert req.charset == 'latin1'
assert req.url_charset == 'utf-8'
def return_ascii(x):
return "ascii"
env = {'CONTENT_TYPE': 'text/plain; charset=x-weird-charset'}
req = MyRequest(env)
req.unknown_charset = return_ascii
assert req.charset == 'ascii'
assert req.url_charset == 'utf-8'
def test_dynamic_charset_response_mixin():
class MyResponse(wrappers.DynamicCharsetResponseMixin, Response):
default_charset = 'utf-7'
resp = MyResponse(mimetype='text/html')
assert resp.charset == 'utf-7'
resp.charset = 'utf-8'
assert resp.charset == 'utf-8'
assert resp.mimetype == 'text/html'
assert resp.mimetype_params == {'charset': 'utf-8'}
resp.mimetype_params['charset'] = 'iso-8859-15'
assert resp.charset == 'iso-8859-15'
resp.set_data(u'Hällo Wörld')
assert b''.join(resp.iter_encoded()) == \
u'Hällo Wörld'.encode('iso-8859-15')
del resp.headers['content-type']
try:
resp.charset = 'utf-8'
except TypeError:
pass
else:
assert False, 'expected type error on charset setting without ct'
|
dimagi/rapidsms-contrib-apps-dev
|
refs/heads/master
|
echo/handlers/echo.py
|
12
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
from rapidsms.contrib.handlers.handlers.keyword import KeywordHandler
class EchoHandler(KeywordHandler):
"""
Handle any message prefixed ``echo``, responding with the remainder
of the text. Useful for remotely testing internationalization.
"""
keyword = "echo"
def help(self):
self.respond("To echo some text, send: ECHO <ANYTHING>")
def handle(self, text):
self.respond(text)
|
mick-d/nipype_source
|
refs/heads/master
|
nipype/workflows/dmri/camino/setup.py
|
28
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('camino', parent_package, top_path)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
bonethrown/azure-sdk-for-python
|
refs/heads/master
|
azure-servicebus/tests/test_servicebus_servicebus.py
|
5
|
# coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import base64
import os
import random
import sys
import time
import unittest
from datetime import datetime
from azure.common import (
AzureHttpError,
AzureMissingResourceHttpError,
AzureConflictHttpError,
)
from azure.servicebus._http import HTTPError
from azure.servicebus import (
AZURE_SERVICEBUS_NAMESPACE,
AZURE_SERVICEBUS_ACCESS_KEY,
AZURE_SERVICEBUS_ISSUER,
AzureServiceBusPeekLockError,
AzureServiceBusResourceNotFound,
Message,
Queue,
Rule,
ServiceBusService,
Subscription,
Topic,
)
from testutils.common_recordingtestcase import (
TestMode,
record,
)
from tests.servicebus_testcase import ServiceBusTestCase
#------------------------------------------------------------------------------
class ServiceBusServiceBusTest(ServiceBusTestCase):
def setUp(self):
super(ServiceBusServiceBusTest, self).setUp()
if self.settings.SERVICEBUS_AUTH_TYPE.lower() == 'sas':
self.sbs = ServiceBusService(
self.settings.SERVICEBUS_NAME,
shared_access_key_name=self.settings.SERVICEBUS_SAS_KEY_NAME,
shared_access_key_value=self.settings.SERVICEBUS_SAS_KEY_VALUE,
)
else:
self.sbs = ServiceBusService(
self.settings.SERVICEBUS_NAME,
account_key=self.settings.SERVICEBUS_ACS_KEY,
issuer='owner',
)
self._set_service_options(self.sbs, self.settings)
self.queue_name = self.get_resource_name('utqueue')
self.topic_name = self.get_resource_name('uttopic')
self.additional_queue_names = []
self.additional_topic_names = []
def tearDown(self):
if not self.is_playback():
try:
self.sbs.delete_queue(self.queue_name)
except:
pass
for name in self.additional_queue_names:
try:
self.sbs.delete_queue(name)
except:
pass
try:
self.sbs.delete_topic(self.topic_name)
except:
pass
for name in self.additional_topic_names:
try:
self.sbs.delete_topic(name)
except:
pass
return super(ServiceBusServiceBusTest, self).tearDown()
#--Helpers-----------------------------------------------------------------
def _create_queue(self, queue_name):
self.sbs.create_queue(queue_name, None, True)
def _create_queue_and_send_msg(self, queue_name, msg):
self._create_queue(queue_name)
self.sbs.send_queue_message(queue_name, msg)
def _create_topic(self, topic_name):
self.sbs.create_topic(topic_name, None, True)
def _create_topic_and_subscription(self, topic_name, subscription_name):
self._create_topic(topic_name)
self._create_subscription(topic_name, subscription_name)
def _create_subscription(self, topic_name, subscription_name):
self.sbs.create_subscription(topic_name, subscription_name, None, True)
#--Test cases for service bus service -------------------------------------
def test_create_service_bus_missing_arguments(self):
# Arrange
if AZURE_SERVICEBUS_NAMESPACE in os.environ:
del os.environ[AZURE_SERVICEBUS_NAMESPACE]
if AZURE_SERVICEBUS_ACCESS_KEY in os.environ:
del os.environ[AZURE_SERVICEBUS_ACCESS_KEY]
if AZURE_SERVICEBUS_ISSUER in os.environ:
del os.environ[AZURE_SERVICEBUS_ISSUER]
# Act
with self.assertRaises(ValueError):
sbs = ServiceBusService()
# Assert
def test_create_service_bus_env_variables(self):
# Arrange
os.environ[AZURE_SERVICEBUS_NAMESPACE] = self.settings.SERVICEBUS_NAME
os.environ[AZURE_SERVICEBUS_ACCESS_KEY] = self.settings.SERVICEBUS_ACS_KEY
os.environ[AZURE_SERVICEBUS_ISSUER] = 'owner'
# Act
sbs = ServiceBusService()
if AZURE_SERVICEBUS_NAMESPACE in os.environ:
del os.environ[AZURE_SERVICEBUS_NAMESPACE]
if AZURE_SERVICEBUS_ACCESS_KEY in os.environ:
del os.environ[AZURE_SERVICEBUS_ACCESS_KEY]
if AZURE_SERVICEBUS_ISSUER in os.environ:
del os.environ[AZURE_SERVICEBUS_ISSUER]
# Assert
self.assertIsNotNone(sbs)
self.assertEqual(sbs.service_namespace, self.settings.SERVICEBUS_NAME)
self.assertEqual(sbs.account_key, self.settings.SERVICEBUS_ACS_KEY)
self.assertEqual(sbs.issuer, 'owner')
#--Test cases for queues --------------------------------------------------
@record
def test_create_queue_no_options(self):
# Arrange
# Act
created = self.sbs.create_queue(self.queue_name)
# Assert
self.assertTrue(created)
@record
def test_create_queue_no_options_fail_on_exist(self):
# Arrange
# Act
created = self.sbs.create_queue(self.queue_name, None, True)
# Assert
self.assertTrue(created)
@record
def test_create_queue_with_options(self):
# Arrange
# Act
queue_options = Queue()
queue_options.default_message_time_to_live = 'PT1M'
queue_options.duplicate_detection_history_time_window = 'PT5M'
queue_options.enable_batched_operations = False
queue_options.dead_lettering_on_message_expiration = False
queue_options.lock_duration = 'PT1M'
queue_options.max_delivery_count = 15
queue_options.max_size_in_megabytes = 5120
queue_options.message_count = 0
queue_options.requires_duplicate_detection = False
queue_options.requires_session = False
queue_options.size_in_bytes = 0
created = self.sbs.create_queue(self.queue_name, queue_options)
# Assert
self.assertTrue(created)
queue = self.sbs.get_queue(self.queue_name)
self.assertEqual('PT1M', queue.default_message_time_to_live)
self.assertEqual('PT5M', queue.duplicate_detection_history_time_window)
self.assertEqual(False, queue.enable_batched_operations)
self.assertEqual(False, queue.dead_lettering_on_message_expiration)
self.assertEqual('PT1M', queue.lock_duration)
self.assertEqual(15, queue.max_delivery_count)
self.assertEqual(5120, queue.max_size_in_megabytes)
self.assertEqual(0, queue.message_count)
self.assertEqual(False, queue.requires_duplicate_detection)
self.assertEqual(False, queue.requires_session)
self.assertEqual(0, queue.size_in_bytes)
@record
def test_create_queue_with_already_existing_queue(self):
# Arrange
# Act
created1 = self.sbs.create_queue(self.queue_name)
created2 = self.sbs.create_queue(self.queue_name)
# Assert
self.assertTrue(created1)
self.assertFalse(created2)
@record
def test_create_queue_with_already_existing_queue_fail_on_exist(self):
# Arrange
# Act
created = self.sbs.create_queue(self.queue_name)
with self.assertRaises(AzureConflictHttpError):
self.sbs.create_queue(self.queue_name, None, True)
# Assert
self.assertTrue(created)
@record
def test_get_queue_with_existing_queue(self):
# Arrange
self._create_queue(self.queue_name)
# Act
queue = self.sbs.get_queue(self.queue_name)
# Assert
self.assertIsNotNone(queue)
self.assertEqual(queue.name, self.queue_name)
@record
def test_get_queue_with_non_existing_queue(self):
# Arrange
# Act
with self.assertRaises(AzureServiceBusResourceNotFound):
resp = self.sbs.get_queue(self.queue_name)
# Assert
@record
def test_list_queues(self):
# Arrange
self._create_queue(self.queue_name)
# Act
queues = self.sbs.list_queues()
for queue in queues:
name = queue.name
# Assert
self.assertIsNotNone(queues)
self.assertNamedItemInContainer(queues, self.queue_name)
@record
def test_list_queues_with_special_chars(self):
# Arrange
# Name must start and end with an alphanumeric and can only contain
# letters, numbers, periods, hyphens, forward slashes and underscores.
other_queue_name = self.queue_name + 'txt/.-_123'
self.additional_queue_names = [other_queue_name]
self._create_queue(other_queue_name)
# Act
queues = self.sbs.list_queues()
# Assert
self.assertIsNotNone(queues)
self.assertNamedItemInContainer(queues, other_queue_name)
@record
def test_delete_queue_with_existing_queue(self):
# Arrange
self._create_queue(self.queue_name)
# Act
deleted = self.sbs.delete_queue(self.queue_name)
# Assert
self.assertTrue(deleted)
queues = self.sbs.list_queues()
self.assertNamedItemNotInContainer(queues, self.queue_name)
@record
def test_delete_queue_with_existing_queue_fail_not_exist(self):
# Arrange
self._create_queue(self.queue_name)
# Act
deleted = self.sbs.delete_queue(self.queue_name, True)
# Assert
self.assertTrue(deleted)
queues = self.sbs.list_queues()
self.assertNamedItemNotInContainer(queues, self.queue_name)
@record
def test_delete_queue_with_non_existing_queue(self):
# Arrange
# Act
deleted = self.sbs.delete_queue(self.queue_name)
# Assert
self.assertFalse(deleted)
@record
def test_delete_queue_with_non_existing_queue_fail_not_exist(self):
# Arrange
# Act
with self.assertRaises(AzureMissingResourceHttpError):
self.sbs.delete_queue(self.queue_name, True)
# Assert
@record
def test_send_queue_message(self):
# Arrange
self._create_queue(self.queue_name)
sent_msg = Message(b'send message')
# Act
self.sbs.send_queue_message(self.queue_name, sent_msg)
# Assert
@record
def test_receive_queue_message_read_delete_mode(self):
# Assert
sent_msg = Message(b'receive message')
self._create_queue_and_send_msg(self.queue_name, sent_msg)
# Act
received_msg = self.sbs.receive_queue_message(self.queue_name, False)
# Assert
self.assertIsNotNone(received_msg)
self.assertEqual(sent_msg.body, received_msg.body)
@record
def test_receive_queue_message_with_broker_properties(self):
# Assert
sent_msg = Message(b'receive message')
sent_msg.broker_properties = \
'{"ForcePersistence": false, "Label": "My label" }'
self._create_queue_and_send_msg(self.queue_name, sent_msg)
# Act
received_msg = self.sbs.receive_queue_message(self.queue_name, False)
# Assert
self.assertIsNotNone(received_msg)
self.assertEqual(sent_msg.body, received_msg.body)
self.assertEqual("My label", received_msg.broker_properties['Label'])
self.assertEqual(False, received_msg.broker_properties['ForcePersistence'])
@record
def test_receive_queue_message_read_delete_mode_throws_on_delete(self):
# Assert
sent_msg = Message(b'receive message')
self._create_queue_and_send_msg(self.queue_name, sent_msg)
# Act
received_msg = self.sbs.receive_queue_message(self.queue_name, False)
with self.assertRaises(AzureServiceBusPeekLockError):
received_msg.delete()
# Assert
@record
def test_receive_queue_message_read_delete_mode_throws_on_unlock(self):
# Assert
sent_msg = Message(b'receive message')
self._create_queue_and_send_msg(self.queue_name, sent_msg)
# Act
received_msg = self.sbs.receive_queue_message(self.queue_name, False)
with self.assertRaises(AzureServiceBusPeekLockError):
received_msg.unlock()
# Assert
@record
def test_receive_queue_message_peek_lock_mode(self):
# Arrange
sent_msg = Message(b'peek lock message')
self._create_queue_and_send_msg(self.queue_name, sent_msg)
# Act
received_msg = self.sbs.receive_queue_message(self.queue_name, True)
# Assert
self.assertIsNotNone(received_msg)
self.assertEqual(sent_msg.body, received_msg.body)
@record
def test_receive_queue_message_delete(self):
# Arrange
sent_msg = Message(b'peek lock message delete')
self._create_queue_and_send_msg(self.queue_name, sent_msg)
# Act
received_msg = self.sbs.receive_queue_message(self.queue_name, True)
received_msg.delete()
# Assert
self.assertIsNotNone(received_msg)
self.assertEqual(sent_msg.body, received_msg.body)
@record
def test_receive_queue_message_delete_with_slash(self):
# Arrange
self.queue_name = self.get_resource_name('ut/queue')
sent_msg = Message(b'peek lock message delete')
self._create_queue_and_send_msg(self.queue_name, sent_msg)
# Act
received_msg = self.sbs.receive_queue_message(self.queue_name, True)
received_msg.delete()
# Assert
self.assertIsNotNone(received_msg)
self.assertEqual(sent_msg.body, received_msg.body)
@record
def test_receive_queue_message_unlock(self):
# Arrange
sent_msg = Message(b'peek lock message unlock')
self._create_queue_and_send_msg(self.queue_name, sent_msg)
# Act
received_msg = self.sbs.receive_queue_message(self.queue_name, True)
received_msg.unlock()
# Assert
received_again_msg = self.sbs.receive_queue_message(
self.queue_name, True)
received_again_msg.delete()
self.assertIsNotNone(received_msg)
self.assertIsNotNone(received_again_msg)
self.assertEqual(sent_msg.body, received_msg.body)
self.assertEqual(received_again_msg.body, received_msg.body)
@record
def test_send_queue_message_with_custom_message_type(self):
# Arrange
self._create_queue(self.queue_name)
# Act
sent_msg = Message(
b'<text>peek lock message custom message type</text>',
type='text/xml')
self.sbs.send_queue_message(self.queue_name, sent_msg)
received_msg = self.sbs.receive_queue_message(self.queue_name, True, 5)
received_msg.delete()
# Assert
self.assertIsNotNone(received_msg)
self.assertEqual('text/xml', received_msg.type)
@record
def test_send_queue_message_with_custom_message_properties(self):
# Arrange
self._create_queue(self.queue_name)
# Act
props = {'hello': 'world',
'number': 42,
'active': True,
'deceased': False,
'large': 8555111000,
'floating': 3.14,
'dob': datetime(2011, 12, 14)}
sent_msg = Message(b'message with properties', custom_properties=props)
self.sbs.send_queue_message(self.queue_name, sent_msg)
received_msg = self.sbs.receive_queue_message(self.queue_name, True, 5)
received_msg.delete()
# Assert
self.assertIsNotNone(received_msg)
self.assertEqual(received_msg.custom_properties['hello'], 'world')
self.assertEqual(received_msg.custom_properties['number'], 42)
self.assertEqual(received_msg.custom_properties['active'], True)
self.assertEqual(received_msg.custom_properties['deceased'], False)
self.assertEqual(received_msg.custom_properties['large'], 8555111000)
self.assertEqual(received_msg.custom_properties['floating'], 3.14)
self.assertEqual(
received_msg.custom_properties['dob'], datetime(2011, 12, 14))
@unittest.skip('flaky')
def test_receive_queue_message_timeout_5(self):
# Arrange
self._create_queue(self.queue_name)
# Act
start = datetime.now()
received_msg = self.sbs.receive_queue_message(self.queue_name, True, 5)
duration = datetime.now() - start
# Assert
self.assertGreater(duration.total_seconds(), 3)
self.assertLess(duration.total_seconds(), 10)
self.assertIsNotNone(received_msg)
self.assertIsNone(received_msg.body)
@unittest.skip('flaky')
def test_receive_queue_message_timeout_50(self):
# Arrange
self._create_queue(self.queue_name)
# Act
start = datetime.now()
received_msg = self.sbs.receive_queue_message(
self.queue_name, True, 50)
duration = datetime.now() - start
# Assert
self.assertGreater(duration.total_seconds(), 48)
self.assertLess(duration.total_seconds(), 55)
self.assertIsNotNone(received_msg)
self.assertIsNone(received_msg.body)
@unittest.skip('flaky')
def test_receive_queue_message_timeout_50_http_timeout(self):
# Arrange
self._create_queue(self.queue_name)
# Act
self.sbs.timeout = 10
try:
received_msg = self.sbs.receive_queue_message(
self.queue_name, True, 50)
self.assertTrue(False, 'Failed to trigger an http timeout')
except:
pass
# Assert
#--Test cases for topics/subscriptions ------------------------------------
@record
def test_create_topic_no_options(self):
# Arrange
# Act
created = self.sbs.create_topic(self.topic_name)
# Assert
self.assertTrue(created)
@record
def test_create_topic_no_options_fail_on_exist(self):
# Arrange
# Act
created = self.sbs.create_topic(self.topic_name, None, True)
# Assert
self.assertTrue(created)
@record
def test_create_topic_with_options(self):
# Arrange
# Act
topic_options = Topic()
topic_options.default_message_time_to_live = 'PT1M'
topic_options.duplicate_detection_history_time_window = 'PT5M'
topic_options.enable_batched_operations = False
topic_options.max_size_in_megabytes = 5120
topic_options.requires_duplicate_detection = False
topic_options.size_in_bytes = 0
# TODO: MaximumNumberOfSubscriptions is not supported?
created = self.sbs.create_topic(self.topic_name, topic_options)
# Assert
self.assertTrue(created)
topic = self.sbs.get_topic(self.topic_name)
self.assertEqual('PT1M', topic.default_message_time_to_live)
self.assertEqual('PT5M', topic.duplicate_detection_history_time_window)
self.assertEqual(False, topic.enable_batched_operations)
self.assertEqual(5120, topic.max_size_in_megabytes)
self.assertEqual(False, topic.requires_duplicate_detection)
self.assertEqual(0, topic.size_in_bytes)
@record
def test_create_topic_with_already_existing_topic(self):
# Arrange
# Act
created1 = self.sbs.create_topic(self.topic_name)
created2 = self.sbs.create_topic(self.topic_name)
# Assert
self.assertTrue(created1)
self.assertFalse(created2)
@record
def test_create_topic_with_already_existing_topic_fail_on_exist(self):
# Arrange
# Act
created = self.sbs.create_topic(self.topic_name)
with self.assertRaises(AzureConflictHttpError):
self.sbs.create_topic(self.topic_name, None, True)
# Assert
self.assertTrue(created)
@record
@unittest.skip('undesirable output, this is old enough, backwards compatibility can be deleted')
def test_topic_backwards_compatibility_warning(self):
# Arrange
topic_options = Topic()
topic_options.max_size_in_megabytes = 5120
# Act
val = topic_options.max_size_in_mega_bytes
# Assert
self.assertEqual(val, 5120)
# Act
topic_options.max_size_in_mega_bytes = 1024
# Assert
self.assertEqual(topic_options.max_size_in_megabytes, 1024)
@record
def test_get_topic_with_existing_topic(self):
# Arrange
self._create_topic(self.topic_name)
# Act
topic = self.sbs.get_topic(self.topic_name)
# Assert
self.assertIsNotNone(topic)
self.assertEqual(topic.name, self.topic_name)
@record
def test_get_topic_with_non_existing_topic(self):
# Arrange
# Act
with self.assertRaises(AzureServiceBusResourceNotFound):
self.sbs.get_topic(self.topic_name)
# Assert
@record
def test_list_topics(self):
# Arrange
self._create_topic(self.topic_name)
# Act
topics = self.sbs.list_topics()
for topic in topics:
name = topic.name
# Assert
self.assertIsNotNone(topics)
self.assertNamedItemInContainer(topics, self.topic_name)
@record
def test_list_topics_with_special_chars(self):
# Arrange
# Name must start and end with an alphanumeric and can only contain
# letters, numbers, periods, hyphens, forward slashes and underscores.
other_topic_name = self.topic_name + 'txt/.-_123'
self.additional_topic_names = [other_topic_name]
self._create_topic(other_topic_name)
# Act
topics = self.sbs.list_topics()
# Assert
self.assertIsNotNone(topics)
self.assertNamedItemInContainer(topics, other_topic_name)
@record
def test_delete_topic_with_existing_topic(self):
# Arrange
self._create_topic(self.topic_name)
# Act
deleted = self.sbs.delete_topic(self.topic_name)
# Assert
self.assertTrue(deleted)
topics = self.sbs.list_topics()
self.assertNamedItemNotInContainer(topics, self.topic_name)
@record
def test_delete_topic_with_existing_topic_fail_not_exist(self):
# Arrange
self._create_topic(self.topic_name)
# Act
deleted = self.sbs.delete_topic(self.topic_name, True)
# Assert
self.assertTrue(deleted)
topics = self.sbs.list_topics()
self.assertNamedItemNotInContainer(topics, self.topic_name)
@record
def test_delete_topic_with_non_existing_topic(self):
# Arrange
# Act
deleted = self.sbs.delete_topic(self.topic_name)
# Assert
self.assertFalse(deleted)
@record
def test_delete_topic_with_non_existing_topic_fail_not_exist(self):
# Arrange
# Act
with self.assertRaises(AzureMissingResourceHttpError):
self.sbs.delete_topic(self.topic_name, True)
# Assert
@record
def test_create_subscription(self):
# Arrange
self._create_topic(self.topic_name)
# Act
created = self.sbs.create_subscription(
self.topic_name, 'MySubscription')
# Assert
self.assertTrue(created)
@record
def test_create_subscription_with_options(self):
# Arrange
self._create_topic(self.topic_name)
# Act
subscription_options = Subscription()
subscription_options.dead_lettering_on_filter_evaluation_exceptions = False
subscription_options.dead_lettering_on_message_expiration = False
subscription_options.default_message_time_to_live = 'PT15M'
subscription_options.enable_batched_operations = False
subscription_options.lock_duration = 'PT1M'
subscription_options.max_delivery_count = 15
#message_count is read-only
subscription_options.message_count = 0
subscription_options.requires_session = False
created = self.sbs.create_subscription(
self.topic_name, 'MySubscription', subscription_options)
# Assert
self.assertTrue(created)
subscription = self.sbs.get_subscription(
self.topic_name, 'MySubscription')
self.assertEqual(
False, subscription.dead_lettering_on_filter_evaluation_exceptions)
self.assertEqual(
False, subscription.dead_lettering_on_message_expiration)
self.assertEqual('PT15M', subscription.default_message_time_to_live)
self.assertEqual(False, subscription.enable_batched_operations)
self.assertEqual('PT1M', subscription.lock_duration)
# self.assertEqual(15, subscription.max_delivery_count) #no idea why
# max_delivery_count is always 10
self.assertEqual(0, subscription.message_count)
self.assertEqual(False, subscription.requires_session)
@record
def test_create_subscription_fail_on_exist(self):
# Arrange
self._create_topic(self.topic_name)
# Act
created = self.sbs.create_subscription(
self.topic_name, 'MySubscription', None, True)
# Assert
self.assertTrue(created)
@record
def test_create_subscription_with_already_existing_subscription(self):
# Arrange
self._create_topic(self.topic_name)
# Act
created1 = self.sbs.create_subscription(
self.topic_name, 'MySubscription')
created2 = self.sbs.create_subscription(
self.topic_name, 'MySubscription')
# Assert
self.assertTrue(created1)
self.assertFalse(created2)
@record
def test_create_subscription_with_already_existing_subscription_fail_on_exist(self):
# Arrange
self._create_topic(self.topic_name)
# Act
created = self.sbs.create_subscription(
self.topic_name, 'MySubscription')
with self.assertRaises(AzureConflictHttpError):
self.sbs.create_subscription(
self.topic_name, 'MySubscription', None, True)
# Assert
self.assertTrue(created)
@record
def test_list_subscriptions(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription2')
# Act
subscriptions = self.sbs.list_subscriptions(self.topic_name)
# Assert
self.assertIsNotNone(subscriptions)
self.assertEqual(len(subscriptions), 1)
self.assertEqual(subscriptions[0].name, 'MySubscription2')
@record
def test_get_subscription_with_existing_subscription(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription3')
# Act
subscription = self.sbs.get_subscription(
self.topic_name, 'MySubscription3')
# Assert
self.assertIsNotNone(subscription)
self.assertEqual(subscription.name, 'MySubscription3')
@record
def test_get_subscription_with_non_existing_subscription(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription3')
# Act
with self.assertRaises(AzureMissingResourceHttpError):
self.sbs.get_subscription(self.topic_name, 'MySubscription4')
# Assert
@record
def test_delete_subscription_with_existing_subscription(self):
# Arrange
self._create_topic(self.topic_name)
self._create_subscription(self.topic_name, 'MySubscription4')
self._create_subscription(self.topic_name, 'MySubscription5')
# Act
deleted = self.sbs.delete_subscription(
self.topic_name, 'MySubscription4')
# Assert
self.assertTrue(deleted)
subscriptions = self.sbs.list_subscriptions(self.topic_name)
self.assertIsNotNone(subscriptions)
self.assertEqual(len(subscriptions), 1)
self.assertEqual(subscriptions[0].name, 'MySubscription5')
@record
def test_delete_subscription_with_existing_subscription_fail_not_exist(self):
# Arrange
self._create_topic(self.topic_name)
self._create_subscription(self.topic_name, 'MySubscription4')
self._create_subscription(self.topic_name, 'MySubscription5')
# Act
deleted = self.sbs.delete_subscription(
self.topic_name, 'MySubscription4', True)
# Assert
self.assertTrue(deleted)
subscriptions = self.sbs.list_subscriptions(self.topic_name)
self.assertIsNotNone(subscriptions)
self.assertEqual(len(subscriptions), 1)
self.assertEqual(subscriptions[0].name, 'MySubscription5')
@record
def test_delete_subscription_with_non_existing_subscription(self):
# Arrange
self._create_topic(self.topic_name)
# Act
deleted = self.sbs.delete_subscription(
self.topic_name, 'MySubscription')
# Assert
self.assertFalse(deleted)
@record
def test_delete_subscription_with_non_existing_subscription_fail_not_exist(self):
# Arrange
self._create_topic(self.topic_name)
# Act
with self.assertRaises(AzureMissingResourceHttpError):
self.sbs.delete_subscription(
self.topic_name, 'MySubscription', True)
# Assert
@record
def test_create_rule_no_options(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
# Act
created = self.sbs.create_rule(
self.topic_name, 'MySubscription', 'MyRule1')
# Assert
self.assertTrue(created)
@record
def test_create_rule_no_options_fail_on_exist(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
# Act
created = self.sbs.create_rule(
self.topic_name, 'MySubscription', 'MyRule1', None, True)
# Assert
self.assertTrue(created)
@record
def test_create_rule_with_already_existing_rule(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
# Act
created1 = self.sbs.create_rule(
self.topic_name, 'MySubscription', 'MyRule1')
created2 = self.sbs.create_rule(
self.topic_name, 'MySubscription', 'MyRule1')
# Assert
self.assertTrue(created1)
self.assertFalse(created2)
@record
def test_create_rule_with_already_existing_rule_fail_on_exist(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
# Act
created = self.sbs.create_rule(
self.topic_name, 'MySubscription', 'MyRule1')
with self.assertRaises(AzureConflictHttpError):
self.sbs.create_rule(
self.topic_name, 'MySubscription', 'MyRule1', None, True)
# Assert
self.assertTrue(created)
@record
def test_create_rule_with_options_sql_filter(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
# Act
rule1 = Rule()
rule1.filter_type = 'SqlFilter'
rule1.filter_expression = 'number > 40'
created = self.sbs.create_rule(
self.topic_name, 'MySubscription', 'MyRule1', rule1)
# Assert
self.assertTrue(created)
@record
def test_create_rule_with_options_true_filter(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
# Act
rule1 = Rule()
rule1.filter_type = 'TrueFilter'
rule1.filter_expression = '1=1'
created = self.sbs.create_rule(
self.topic_name, 'MySubscription', 'MyRule1', rule1)
# Assert
self.assertTrue(created)
@record
def test_create_rule_with_options_false_filter(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
# Act
rule1 = Rule()
rule1.filter_type = 'FalseFilter'
rule1.filter_expression = '1=0'
created = self.sbs.create_rule(
self.topic_name, 'MySubscription', 'MyRule1', rule1)
# Assert
self.assertTrue(created)
@record
def test_create_rule_with_options_correlation_filter(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
# Act
rule1 = Rule()
rule1.filter_type = 'CorrelationFilter'
rule1.filter_expression = 'myid'
created = self.sbs.create_rule(
self.topic_name, 'MySubscription', 'MyRule1', rule1)
# Assert
self.assertTrue(created)
@record
def test_create_rule_with_options_empty_rule_action(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
# Act
rule1 = Rule()
rule1.action_type = 'EmptyRuleAction'
rule1.action_expression = ''
created = self.sbs.create_rule(
self.topic_name, 'MySubscription', 'MyRule1', rule1)
# Assert
self.assertTrue(created)
@record
def test_create_rule_with_options_sql_rule_action(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
# Act
rule1 = Rule()
rule1.action_type = 'SqlRuleAction'
rule1.action_expression = "SET number = 5"
created = self.sbs.create_rule(
self.topic_name, 'MySubscription', 'MyRule1', rule1)
# Assert
self.assertTrue(created)
@record
def test_list_rules(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
resp = self.sbs.create_rule(
self.topic_name, 'MySubscription', 'MyRule2')
# Act
rules = self.sbs.list_rules(self.topic_name, 'MySubscription')
# Assert
self.assertEqual(len(rules), 2)
@record
def test_get_rule_with_existing_rule(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
# Act
rule = self.sbs.get_rule(self.topic_name, 'MySubscription', '$Default')
# Assert
self.assertIsNotNone(rule)
self.assertEqual(rule.name, '$Default')
@record
def test_get_rule_with_non_existing_rule(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
# Act
with self.assertRaises(AzureMissingResourceHttpError):
self.sbs.get_rule(self.topic_name,
'MySubscription', 'NonExistingRule')
# Assert
@record
def test_get_rule_with_existing_rule_with_options(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
sent_rule = Rule()
sent_rule.filter_type = 'SqlFilter'
sent_rule.filter_expression = 'number > 40'
sent_rule.action_type = 'SqlRuleAction'
sent_rule.action_expression = 'SET number = 5'
self.sbs.create_rule(
self.topic_name, 'MySubscription', 'MyRule1', sent_rule)
# Act
received_rule = self.sbs.get_rule(
self.topic_name, 'MySubscription', 'MyRule1')
# Assert
self.assertIsNotNone(received_rule)
self.assertEqual(received_rule.name, 'MyRule1')
self.assertEqual(received_rule.filter_type, sent_rule.filter_type)
self.assertEqual(received_rule.filter_expression,
sent_rule.filter_expression)
self.assertEqual(received_rule.action_type, sent_rule.action_type)
self.assertEqual(received_rule.action_expression,
sent_rule.action_expression)
@record
def test_delete_rule_with_existing_rule(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
resp = self.sbs.create_rule(
self.topic_name, 'MySubscription', 'MyRule3')
resp = self.sbs.create_rule(
self.topic_name, 'MySubscription', 'MyRule4')
# Act
deleted1 = self.sbs.delete_rule(
self.topic_name, 'MySubscription', 'MyRule4')
deleted2 = self.sbs.delete_rule(
self.topic_name, 'MySubscription', '$Default')
# Assert
self.assertTrue(deleted1)
self.assertTrue(deleted2)
rules = self.sbs.list_rules(self.topic_name, 'MySubscription')
self.assertIsNotNone(rules)
self.assertEqual(len(rules), 1)
self.assertEqual(rules[0].name, 'MyRule3')
@record
def test_delete_rule_with_existing_rule_fail_not_exist(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
resp = self.sbs.create_rule(
self.topic_name, 'MySubscription', 'MyRule3')
resp = self.sbs.create_rule(
self.topic_name, 'MySubscription', 'MyRule4')
# Act
deleted1 = self.sbs.delete_rule(
self.topic_name, 'MySubscription', 'MyRule4', True)
deleted2 = self.sbs.delete_rule(
self.topic_name, 'MySubscription', '$Default', True)
# Assert
self.assertTrue(deleted1)
self.assertTrue(deleted2)
rules = self.sbs.list_rules(self.topic_name, 'MySubscription')
self.assertIsNotNone(rules)
self.assertEqual(len(rules), 1)
self.assertEqual(rules[0].name, 'MyRule3')
@record
def test_delete_rule_with_non_existing_rule(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
# Act
deleted = self.sbs.delete_rule(
self.topic_name, 'MySubscription', 'NonExistingRule')
# Assert
self.assertFalse(deleted)
@record
def test_delete_rule_with_non_existing_rule_fail_not_exist(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
# Act
with self.assertRaises(AzureMissingResourceHttpError):
self.sbs.delete_rule(
self.topic_name, 'MySubscription', 'NonExistingRule', True)
# Assert
@record
def test_send_topic_message(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
sent_msg = Message(b'subscription message')
# Act
self.sbs.send_topic_message(self.topic_name, sent_msg)
# Assert
@record
def test_receive_subscription_message_read_delete_mode(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
sent_msg = Message(b'subscription message')
self.sbs.send_topic_message(self.topic_name, sent_msg)
# Act
received_msg = self.sbs.receive_subscription_message(
self.topic_name, 'MySubscription', False)
# Assert
self.assertIsNotNone(received_msg)
self.assertEqual(sent_msg.body, received_msg.body)
@record
def test_receive_subscription_message_read_delete_mode_throws_on_delete(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
sent_msg = Message(b'subscription message')
self.sbs.send_topic_message(self.topic_name, sent_msg)
# Act
received_msg = self.sbs.receive_subscription_message(
self.topic_name, 'MySubscription', False)
with self.assertRaises(AzureServiceBusPeekLockError):
received_msg.delete()
# Assert
@record
def test_receive_subscription_message_read_delete_mode_throws_on_unlock(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
sent_msg = Message(b'subscription message')
self.sbs.send_topic_message(self.topic_name, sent_msg)
# Act
received_msg = self.sbs.receive_subscription_message(
self.topic_name, 'MySubscription', False)
with self.assertRaises(AzureServiceBusPeekLockError):
received_msg.unlock()
# Assert
@record
def test_receive_subscription_message_peek_lock_mode(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
sent_msg = Message(b'subscription message')
self.sbs.send_topic_message(self.topic_name, sent_msg)
# Act
received_msg = self.sbs.receive_subscription_message(
self.topic_name, 'MySubscription', True, 5)
# Assert
self.assertIsNotNone(received_msg)
self.assertEqual(sent_msg.body, received_msg.body)
@record
def test_receive_subscription_message_delete(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
sent_msg = Message(b'subscription message')
self.sbs.send_topic_message(self.topic_name, sent_msg)
# Act
received_msg = self.sbs.receive_subscription_message(
self.topic_name, 'MySubscription', True, 5)
received_msg.delete()
# Assert
self.assertIsNotNone(received_msg)
self.assertEqual(sent_msg.body, received_msg.body)
@record
def test_receive_subscription_message_delete_with_slash(self):
# Arrange
self.topic_name = self.get_resource_name('ut/topic')
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
sent_msg = Message(b'subscription message')
self.sbs.send_topic_message(self.topic_name, sent_msg)
# Act
received_msg = self.sbs.receive_subscription_message(
self.topic_name, 'MySubscription', True, 5)
received_msg.delete()
# Assert
self.assertIsNotNone(received_msg)
self.assertEqual(sent_msg.body, received_msg.body)
@record
def test_receive_subscription_message_unlock(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
sent_msg = Message(b'subscription message')
self.sbs.send_topic_message(self.topic_name, sent_msg)
# Act
received_msg = self.sbs.receive_subscription_message(
self.topic_name, 'MySubscription', True)
received_msg.unlock()
# Assert
received_again_msg = self.sbs.receive_subscription_message(
self.topic_name, 'MySubscription', True)
received_again_msg.delete()
self.assertIsNotNone(received_msg)
self.assertIsNotNone(received_again_msg)
self.assertEqual(sent_msg.body, received_msg.body)
self.assertEqual(received_again_msg.body, received_msg.body)
@record
def test_with_filter(self):
# Single filter
called = []
def my_filter(request, next):
called.append(True)
return next(request)
sbs = self.sbs.with_filter(my_filter)
sbs.create_topic(self.topic_name + '0', None, True)
self.assertTrue(called)
del called[:]
sbs.delete_topic(self.topic_name + '0')
self.assertTrue(called)
del called[:]
# Chained filters
def filter_a(request, next):
called.append('a')
return next(request)
def filter_b(request, next):
called.append('b')
return next(request)
sbs = self.sbs.with_filter(filter_a).with_filter(filter_b)
sbs.create_topic(self.topic_name + '0', None, True)
self.assertEqual(called, ['b', 'a'])
sbs.delete_topic(self.topic_name + '0')
self.assertEqual(called, ['b', 'a', 'b', 'a'])
@unittest.skip('requires extra setup')
def test_two_identities(self):
# In order to run this test, 2 service bus service identities are
# created using the sbaztool available at:
# http://code.msdn.microsoft.com/windowsazure/Authorization-SBAzTool-6fd76d93
#
# Use the following commands to create 2 identities and grant access
# rights.
# Replace <servicebusnamespace> with the namespace specified in the
# test .json file
# Replace <servicebuskey> with the key specified in the test .json file
# This only needs to be executed once, after the service bus namespace
# is created.
#
# sbaztool makeid user1 NoHEoD6snlvlhZm7yek9Etxca3l0CYjfc19ICIJZoUg= -n <servicebusnamespace> -k <servicebuskey>
# sbaztool grant Send /path1 user1 -n <servicebusnamespace> -k <servicebuskey>
# sbaztool grant Listen /path1 user1 -n <servicebusnamespace> -k <servicebuskey>
# sbaztool grant Manage /path1 user1 -n <servicebusnamespace> -k
# <servicebuskey>
# sbaztool makeid user2 Tb6K5qEgstyRBwp86JEjUezKj/a+fnkLFnibfgvxvdg= -n <servicebusnamespace> -k <servicebuskey>
# sbaztool grant Send /path2 user2 -n <servicebusnamespace> -k <servicebuskey>
# sbaztool grant Listen /path2 user2 -n <servicebusnamespace> -k <servicebuskey>
# sbaztool grant Manage /path2 user2 -n <servicebusnamespace> -k
# <servicebuskey>
sbs1 = ServiceBusService(self.settings.SERVICEBUS_NAME,
'NoHEoD6snlvlhZm7yek9Etxca3l0CYjfc19ICIJZoUg=',
'user1')
sbs2 = ServiceBusService(self.settings.SERVICEBUS_NAME,
'Tb6K5qEgstyRBwp86JEjUezKj/a+fnkLFnibfgvxvdg=',
'user2')
queue1_name = 'path1/queue' + str(random.randint(1, 10000000))
queue2_name = 'path2/queue' + str(random.randint(1, 10000000))
try:
# Create queues, success
sbs1.create_queue(queue1_name)
sbs2.create_queue(queue2_name)
# Receive messages, success
msg = sbs1.receive_queue_message(queue1_name, True, 1)
self.assertIsNone(msg.body)
msg = sbs1.receive_queue_message(queue1_name, True, 1)
self.assertIsNone(msg.body)
msg = sbs2.receive_queue_message(queue2_name, True, 1)
self.assertIsNone(msg.body)
msg = sbs2.receive_queue_message(queue2_name, True, 1)
self.assertIsNone(msg.body)
# Receive messages, failure
with self.assertRaises(AzureHttpError):
msg = sbs1.receive_queue_message(queue2_name, True, 1)
with self.assertRaises(AzureHttpError):
msg = sbs2.receive_queue_message(queue1_name, True, 1)
finally:
try:
sbs1.delete_queue(queue1_name)
except:
pass
try:
sbs2.delete_queue(queue2_name)
except:
pass
@record
def test_unicode_create_queue_unicode_name(self):
# Arrange
self.queue_name = self.queue_name + u'啊齄丂狛狜'
# Act
with self.assertRaises(AzureHttpError):
created = self.sbs.create_queue(self.queue_name)
# Assert
@record
def test_send_queue_message_unicode_python_27(self):
'''Test for auto-encoding of unicode text (backwards compatibility).'''
if sys.version_info >= (3,):
return
# Arrange
data = u'receive message啊齄丂狛狜'
sent_msg = Message(data)
self._create_queue(self.queue_name)
# Act
self.sbs.send_queue_message(self.queue_name, sent_msg)
# Assert
received_msg = self.sbs.receive_queue_message(self.queue_name, False)
self.assertIsNotNone(received_msg)
self.assertEqual(received_msg.body, data.encode('utf-8'))
@record
def test_send_queue_message_unicode_python_33(self):
if sys.version_info < (3,):
return
# Arrange
data = u'receive message啊齄丂狛狜'
sent_msg = Message(data)
self._create_queue(self.queue_name)
# Act
with self.assertRaises(TypeError):
self.sbs.send_queue_message(self.queue_name, sent_msg)
# Assert
@record
def test_unicode_receive_queue_message_unicode_data(self):
# Assert
sent_msg = Message(u'receive message啊齄丂狛狜'.encode('utf-8'))
self._create_queue_and_send_msg(self.queue_name, sent_msg)
# Act
received_msg = self.sbs.receive_queue_message(self.queue_name, False)
# Assert
self.assertIsNotNone(received_msg)
self.assertEqual(sent_msg.body, received_msg.body)
@record
def test_unicode_receive_queue_message_binary_data(self):
# Arrange
base64_data = 'AAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8gISIjJCUmJygpKissLS4vMDEyMzQ1Njc4OTo7PD0+P0BBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWltcXV5fYGFiY2RlZmdoaWprbG1ub3BxcnN0dXZ3eHl6e3x9fn+AgYKDhIWGh4iJiouMjY6PkJGSk5SVlpeYmZqbnJ2en6ChoqOkpaanqKmqq6ytrq+wsbKztLW2t7i5uru8vb6/wMHCw8TFxsfIycrLzM3Oz9DR0tPU1dbX2Nna29zd3t/g4eLj5OXm5+jp6uvs7e7v8PHy8/T19vf4+fr7/P3+/wABAgMEBQYHCAkKCwwNDg8QERITFBUWFxgZGhscHR4fICEiIyQlJicoKSorLC0uLzAxMjM0NTY3ODk6Ozw9Pj9AQUJDREVGR0hJSktMTU5PUFFSU1RVVldYWVpbXF1eX2BhYmNkZWZnaGlqa2xtbm9wcXJzdHV2d3h5ent8fX5/gIGCg4SFhoeIiYqLjI2Oj5CRkpOUlZaXmJmam5ydnp+goaKjpKWmp6ipqqusra6vsLGys7S1tre4ubq7vL2+v8DBwsPExcbHyMnKy8zNzs/Q0dLT1NXW19jZ2tvc3d7f4OHi4+Tl5ufo6err7O3u7/Dx8vP09fb3+Pn6+/z9/v8AAQIDBAUGBwgJCgsMDQ4PEBESExQVFhcYGRobHB0eHyAhIiMkJSYnKCkqKywtLi8wMTIzNDU2Nzg5Ojs8PT4/QEFCQ0RFRkdISUpLTE1OT1BRUlNUVVZXWFlaW1xdXl9gYWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXp7fH1+f4CBgoOEhYaHiImKi4yNjo+QkZKTlJWWl5iZmpucnZ6foKGio6SlpqeoqaqrrK2ur7CxsrO0tba3uLm6u7y9vr/AwcLDxMXGx8jJysvMzc7P0NHS09TV1tfY2drb3N3e3+Dh4uPk5ebn6Onq6+zt7u/w8fLz9PX29/j5+vv8/f7/AAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8gISIjJCUmJygpKissLS4vMDEyMzQ1Njc4OTo7PD0+P0BBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWltcXV5fYGFiY2RlZmdoaWprbG1ub3BxcnN0dXZ3eHl6e3x9fn+AgYKDhIWGh4iJiouMjY6PkJGSk5SVlpeYmZqbnJ2en6ChoqOkpaanqKmqq6ytrq+wsbKztLW2t7i5uru8vb6/wMHCw8TFxsfIycrLzM3Oz9DR0tPU1dbX2Nna29zd3t/g4eLj5OXm5+jp6uvs7e7v8PHy8/T19vf4+fr7/P3+/w=='
binary_data = base64.b64decode(base64_data)
sent_msg = Message(binary_data)
self._create_queue_and_send_msg(self.queue_name, sent_msg)
# Act
received_msg = self.sbs.receive_queue_message(self.queue_name, False)
# Assert
self.assertIsNotNone(received_msg)
self.assertEqual(sent_msg.body, received_msg.body)
@record
def test_unicode_create_subscription_unicode_name(self):
# Arrange
self._create_topic(self.topic_name)
# Act
with self.assertRaises(AzureHttpError):
created = self.sbs.create_subscription(
self.topic_name, u'MySubscription啊齄丂狛狜')
# Assert
@record
def test_unicode_create_rule_unicode_name(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
# Act
with self.assertRaises(AzureHttpError):
created = self.sbs.create_rule(
self.topic_name, 'MySubscription', 'MyRule啊齄丂狛狜')
# Assert
@record
def test_send_topic_message_unicode_python_27(self):
'''Test for auto-encoding of unicode text (backwards compatibility).'''
if sys.version_info >= (3,):
return
# Arrange
data = u'receive message啊齄丂狛狜'
sent_msg = Message(data)
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
# Act
self.sbs.send_topic_message(self.topic_name, sent_msg)
# Assert
received_msg = self.sbs.receive_subscription_message(
self.topic_name, 'MySubscription', False)
self.assertIsNotNone(received_msg)
self.assertEqual(received_msg.body, data.encode('utf-8'))
@record
def test_send_topic_message_unicode_python_33(self):
if sys.version_info < (3,):
return
# Arrange
data = u'receive message啊齄丂狛狜'
sent_msg = Message(data)
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
# Act
with self.assertRaises(TypeError):
self.sbs.send_topic_message(self.topic_name, sent_msg)
# Assert
@record
def test_unicode_receive_subscription_message_unicode_data(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
sent_msg = Message(u'subscription message啊齄丂狛狜'.encode('utf-8'))
self.sbs.send_topic_message(self.topic_name, sent_msg)
# Act
received_msg = self.sbs.receive_subscription_message(
self.topic_name, 'MySubscription', False)
# Assert
self.assertIsNotNone(received_msg)
self.assertEqual(sent_msg.body, received_msg.body)
@record
def test_unicode_receive_subscription_message_binary_data(self):
# Arrange
base64_data = 'AAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8gISIjJCUmJygpKissLS4vMDEyMzQ1Njc4OTo7PD0+P0BBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWltcXV5fYGFiY2RlZmdoaWprbG1ub3BxcnN0dXZ3eHl6e3x9fn+AgYKDhIWGh4iJiouMjY6PkJGSk5SVlpeYmZqbnJ2en6ChoqOkpaanqKmqq6ytrq+wsbKztLW2t7i5uru8vb6/wMHCw8TFxsfIycrLzM3Oz9DR0tPU1dbX2Nna29zd3t/g4eLj5OXm5+jp6uvs7e7v8PHy8/T19vf4+fr7/P3+/wABAgMEBQYHCAkKCwwNDg8QERITFBUWFxgZGhscHR4fICEiIyQlJicoKSorLC0uLzAxMjM0NTY3ODk6Ozw9Pj9AQUJDREVGR0hJSktMTU5PUFFSU1RVVldYWVpbXF1eX2BhYmNkZWZnaGlqa2xtbm9wcXJzdHV2d3h5ent8fX5/gIGCg4SFhoeIiYqLjI2Oj5CRkpOUlZaXmJmam5ydnp+goaKjpKWmp6ipqqusra6vsLGys7S1tre4ubq7vL2+v8DBwsPExcbHyMnKy8zNzs/Q0dLT1NXW19jZ2tvc3d7f4OHi4+Tl5ufo6err7O3u7/Dx8vP09fb3+Pn6+/z9/v8AAQIDBAUGBwgJCgsMDQ4PEBESExQVFhcYGRobHB0eHyAhIiMkJSYnKCkqKywtLi8wMTIzNDU2Nzg5Ojs8PT4/QEFCQ0RFRkdISUpLTE1OT1BRUlNUVVZXWFlaW1xdXl9gYWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXp7fH1+f4CBgoOEhYaHiImKi4yNjo+QkZKTlJWWl5iZmpucnZ6foKGio6SlpqeoqaqrrK2ur7CxsrO0tba3uLm6u7y9vr/AwcLDxMXGx8jJysvMzc7P0NHS09TV1tfY2drb3N3e3+Dh4uPk5ebn6Onq6+zt7u/w8fLz9PX29/j5+vv8/f7/AAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8gISIjJCUmJygpKissLS4vMDEyMzQ1Njc4OTo7PD0+P0BBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWltcXV5fYGFiY2RlZmdoaWprbG1ub3BxcnN0dXZ3eHl6e3x9fn+AgYKDhIWGh4iJiouMjY6PkJGSk5SVlpeYmZqbnJ2en6ChoqOkpaanqKmqq6ytrq+wsbKztLW2t7i5uru8vb6/wMHCw8TFxsfIycrLzM3Oz9DR0tPU1dbX2Nna29zd3t/g4eLj5OXm5+jp6uvs7e7v8PHy8/T19vf4+fr7/P3+/w=='
binary_data = base64.b64decode(base64_data)
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
sent_msg = Message(binary_data)
self.sbs.send_topic_message(self.topic_name, sent_msg)
# Act
received_msg = self.sbs.receive_subscription_message(
self.topic_name, 'MySubscription', False)
# Assert
self.assertIsNotNone(received_msg)
self.assertEqual(sent_msg.body, received_msg.body)
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
|
ohadbc/hwspinlock-next
|
refs/heads/for_arnd
|
scripts/rt-tester/rt-tester.py
|
11005
|
#!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
|
hshindo/POS-Tagging-benchmark
|
refs/heads/master
|
Chainer/ctagger/train.py
|
1
|
from . import nn
from . import util
import os
import logging
import time
import random
import chainer.optimizers as O
import chainer.functions as F
import chainer.links as L
from chainer import Variable
from chainer import cuda
def _log_str(lst):
s = []
for k, v in lst:
s.append(k + ':')
if isinstance(v, float):
v_str = '{:.6f}'.format(v)
else:
v_str = str(v)
s.append(v_str)
return '\t'.join(s)
def _softmax_cross_entropy_no_normalize(y, t):
return F.softmax_cross_entropy(y, t) * y.data.shape[0]
def train(args):
if args.gpu is not None:
cuda.get_device(args.gpu).use()
if args.linear_conv:
assert not args.use_char
os.makedirs(args.model)
# set up logger
logger = logging.getLogger()
logging.basicConfig(level=logging.INFO)
log_path = os.path.join(args.model, 'log')
file_handler = logging.FileHandler(log_path)
fmt = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
file_handler.setFormatter(fmt)
logger.addHandler(file_handler)
# set up optimizer
optim_name = args.optim[0]
assert not args.decay_lr or optim_name == 'SGD', 'learning-rate decay is only supported for SGD'
optim_args = map(float, args.optim[1:])
optimizer = getattr(O, optim_name)(*optim_args)
# load data
logger.info('Loading word list...')
ws = set()
with open(args.words) as f:
for line in f:
w = line.rstrip().decode('utf-8')
ws.add(w)
logger.info('Loading training data...')
corpus, vocab_word, vocab_char, vocab_tag = util.load_conll(args.data, args.vocab_size, limit_vocab=ws)
if args.test:
logger.info('Loading test data...')
corpus_test, vocab_word_test, vocab_char_test, vocab_tag_test = util.load_conll(args.test, None, limit_vocab=ws)
for w in vocab_word_test.i2w:
if args.vocab_size is None or vocab_word.size() < args.vocab_size:
vocab_word.add_word(w)
for c in vocab_char_test.i2w:
vocab_char.add_word(c)
for t in vocab_tag_test.i2w:
vocab_tag.add_word(t)
# load pre-trained embeddings
init_emb = None
if args.init_emb:
logger.info('Loading word embeddings...')
init_emb = util.load_init_emb(args.init_emb, args.words, vocab_word)
emb_dim = init_emb.shape[1]
else:
emb_dim = args.word_emb
# create batches
logger.info('Creating batches...')
batches = util.create_batches(corpus, vocab_word, vocab_char, vocab_tag, args.batch,
linear_conv=args.linear_conv, window_size=args.word_window, pad_char=args.pad_char,
gpu=args.gpu, shuffle=not args.no_shuffle)
# set up tagger
tagger = nn.NnTagger(
word_vocab_size=vocab_word.size(), word_emb_dim=emb_dim, word_window_size=args.word_window, word_init_emb=init_emb, word_hidden_dim=args.word_hidden,
use_char=args.use_char, linear_conv=args.linear_conv, pad_char=args.pad_char,
char_vocab_size=vocab_char.size(), char_emb_dim=args.char_emb, char_window_size=args.char_window, char_hidden_dim=args.char_hidden,
tag_num=vocab_tag.size())
classifier = L.Classifier(tagger, lossfun=_softmax_cross_entropy_no_normalize)
initial_lr = None
if args.decay_lr:
initial_lr = optimizer.lr
# set up GPU
if args.gpu is not None:
classifier.to_gpu()
optimizer.setup(classifier)
# create directory
vocab_word.save(os.path.join(args.model, 'vocab_word'))
vocab_char.save(os.path.join(args.model, 'vocab_char'))
vocab_tag.save(os.path.join(args.model, 'vocab_tag'))
total_time = 0.
# training loop
for n in range(args.epoch):
epoch_begin = time.time()
# decay learning rate
if args.decay_lr:
optimizer.lr = initial_lr / (n + 1)
logger.info('Learning rate set to: {}'.format(optimizer.lr))
if not args.no_shuffle:
random.shuffle(batches)
for i, ((word_ids_data, (char_ids_data, char_boundaries)), t_data) in enumerate(batches):
batch_size, batch_length = word_ids_data.shape[:2]
time_start = time.time()
word_ids = Variable(word_ids_data)
char_ids = Variable(char_ids_data)
t = Variable(t_data)
batch = word_ids, (char_ids, char_boundaries)
optimizer.update(classifier, batch, t)
time_end = time.time()
time_delta = time_end - time_start
logger.info(_log_str([
('epoch', n),
('batch', i),
('loss', float(classifier.loss.data)),
('acc', float(classifier.accuracy.data)),
('size', batch_size),
('len', batch_length),
('time', int(time_delta * 1000)),
]))
epoch_end = time.time()
total_time += epoch_end - epoch_begin
# save current model
dest_path = os.path.join(args.model, 'epoch' + str(n))
tagger.save(dest_path)
logger.info('Training done.')
logger.info('Total time: {} sec'.format(total_time))
|
OmkarPathak/Python-Programs
|
refs/heads/master
|
CompetitiveProgramming/HackerEarth/Basics_Of_Input_Output/P03_FindProduct.py
|
1
|
# You have been given an array A of size N consisting of positive integers. You need to find and print the product of all
# the number in this array Modulo (10^9+7).
#
# Input Format:
# The first line contains a single integer N denoting the size of the array. The next line contains N space separated
# integers denoting the elements of the array
#
# Output Format:
# Print a single integer denoting the product of all the elements of the array Modulo
#
# Constraints:
# 1 ≤ N ≤ 103
# 1 ≤ A[i] ≤ 103
# Sample Input:
# 5
# 1 2 3 4 5
#
# Sample Output:
# 120
modulo = (10 ** 9 + 7)
result = 1
testCases = int(input())
List = [int(i) for i in input().split()]
for i in List:
result = result * i % modulo
print(result)
|
acenario/Payable
|
refs/heads/master
|
lib/python2.7/site-packages/django/contrib/auth/tests/__init__.py
|
12133432
| |
srilatha44/threepress
|
refs/heads/master
|
bookworm/library/test-data/__init__.py
|
12133432
| |
vuntz/glance
|
refs/heads/master
|
glance/contrib/plugins/image_artifact/__init__.py
|
12133432
| |
mdrumond/tensorflow
|
refs/heads/master
|
tensorflow/contrib/tpu/profiler/pip_package/setup.py
|
20
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Cloud TPU profiler package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from setuptools import setup
_VERSION = '1.3.0-a1'
CONSOLE_SCRIPTS = [
'capture_tpu_profile=cloud_tpu_profiler.main:main',
]
REQUIRED_PACKAGES = [
'tensorflow >= 1.2.0',
]
setup(
name='cloud_tpu_profiler',
version=_VERSION.replace('-', ''),
description='Trace and profile Cloud TPU performance',
long_description='Tools for capture TPU profile',
url='https://www.tensorflow.org/tfrc/',
author='Google Inc.',
author_email='opensource@google.com',
packages=['cloud_tpu_profiler'],
package_data={
'cloud_tpu_profiler': ['data/*'],
},
entry_points={
'console_scripts': CONSOLE_SCRIPTS,
},
install_requires=REQUIRED_PACKAGES,
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
license='Apache 2.0',
keywords='tensorflow performance tpu',)
|
teochenglim/ansible-modules-extras
|
refs/heads/devel
|
cloud/lxd/lxd_container.py
|
18
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Hiroaki Nakamura <hnakamur@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: lxd_container
short_description: Manage LXD Containers
version_added: "2.2"
description:
- Management of LXD containers
author: "Hiroaki Nakamura (@hnakamur)"
options:
name:
description:
- Name of a container.
required: true
architecture:
description:
- The archiecture for the container (e.g. "x86_64" or "i686").
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)
required: false
config:
description:
- 'The config for the container (e.g. {"limits.cpu": "2"}).
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)'
- If the container already exists and its "config" value in metadata
obtained from
GET /1.0/containers/<name>
U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#10containersname)
are different, they this module tries to apply the configurations.
- The key starts with 'volatile.' are ignored for this comparison.
- Not all config values are supported to apply the existing container.
Maybe you need to delete and recreate a container.
required: false
devices:
description:
- 'The devices for the container
(e.g. { "rootfs": { "path": "/dev/kvm", "type": "unix-char" }).
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)'
required: false
ephemeral:
description:
- Whether or not the container is ephemeral (e.g. true or false).
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)
required: false
source:
description:
- 'The source for the container
(e.g. { "type": "image",
"mode": "pull",
"server": "https://images.linuxcontainers.org",
"protocol": "lxd",
"alias": "ubuntu/xenial/amd64" }).
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)'
required: false
state:
choices:
- started
- stopped
- restarted
- absent
- frozen
description:
- Define the state of a container.
required: false
default: started
timeout:
description:
- A timeout for changing the state of the container.
- This is also used as a timeout for waiting until IPv4 addresses
are set to the all network interfaces in the container after
starting or restarting.
required: false
default: 30
wait_for_ipv4_addresses:
description:
- If this is true, the M(lxd_container) waits until IPv4 addresses
are set to the all network interfaces in the container after
starting or restarting.
required: false
default: false
force_stop:
description:
- If this is true, the M(lxd_container) forces to stop the container
when it stops or restarts the container.
required: false
default: false
url:
description:
- The unix domain socket path or the https URL for the LXD server.
required: false
default: unix:/var/lib/lxd/unix.socket
key_file:
description:
- The client certificate key file path.
required: false
default: '"{}/.config/lxc/client.key" .format(os.environ["HOME"])'
cert_file:
description:
- The client certificate file path.
required: false
default: '"{}/.config/lxc/client.crt" .format(os.environ["HOME"])'
trust_password:
description:
- The client trusted password.
- You need to set this password on the LXD server before
running this module using the following command.
lxc config set core.trust_password <some random password>
See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/)
- If trust_password is set, this module send a request for
authentication before sending any requests.
required: false
notes:
- Containers must have a unique name. If you attempt to create a container
with a name that already existed in the users namespace the module will
simply return as "unchanged".
- There are two ways to can run commands in containers, using the command
module or using the ansible lxd connection plugin bundled in Ansible >=
2.1, the later requires python to be installed in the container which can
be done with the command module.
- You can copy a file from the host to the container
with the Ansible M(copy) and M(templater) module and the `lxd` connection plugin.
See the example below.
- You can copy a file in the creatd container to the localhost
with `command=lxc file pull container_name/dir/filename filename`.
See the first example below.
'''
EXAMPLES = '''
# An example for creating a Ubuntu container and install python
- hosts: localhost
connection: local
tasks:
- name: Create a started container
lxd_container:
name: mycontainer
state: started
source:
type: image
mode: pull
server: https://images.linuxcontainers.org
protocol: lxd
alias: ubuntu/xenial/amd64
profiles: ["default"]
wait_for_ipv4_addresses: true
timeout: 600
- name: check python is installed in container
delegate_to: mycontainer
raw: dpkg -s python
register: python_install_check
failed_when: python_install_check.rc not in [0, 1]
changed_when: false
- name: install python in container
delegate_to: mycontainer
raw: apt-get install -y python
when: python_install_check.rc == 1
# An example for deleting a container
- hosts: localhost
connection: local
tasks:
- name: Restart a container
lxd_container:
name: mycontainer
state: restarted
# An example for restarting a container
- hosts: localhost
connection: local
tasks:
- name: Restart a container
lxd_container:
name: mycontainer
state: restarted
# An example for restarting a container using https to connect to the LXD server
- hosts: localhost
connection: local
tasks:
- name: Restart a container
lxd_container:
url: https://127.0.0.1:8443
# These cert_file and key_file values are equal to the default values.
#cert_file: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt"
#key_file: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key"
trust_password: mypassword
name: mycontainer
state: restarted
# Note your container must be in the inventory for the below example.
#
# [containers]
# mycontainer ansible_connection=lxd
#
- hosts:
- mycontainer
tasks:
- name: copy /etc/hosts in the created container to localhost with name "mycontainer-hosts"
fetch:
src: /etc/hosts
dest: /tmp/mycontainer-hosts
flat: true
'''
RETURN='''
addresses:
description: Mapping from the network device name to a list of IPv4 addresses in the container
returned: when state is started or restarted
type: object
sample: {"eth0": ["10.155.92.191"]}
old_state:
description: The old state of the container
returned: when state is started or restarted
type: string
sample: "stopped"
logs:
description: The logs of requests and responses.
returned: when ansible-playbook is invoked with -vvvv.
type: list
sample: "(too long to be placed here)"
actions:
description: List of actions performed for the container.
returned: success
type: list
sample: '["create", "start"]'
'''
import os
from ansible.module_utils.lxd import LXDClient, LXDClientException
# LXD_ANSIBLE_STATES is a map of states that contain values of methods used
# when a particular state is evoked.
LXD_ANSIBLE_STATES = {
'started': '_started',
'stopped': '_stopped',
'restarted': '_restarted',
'absent': '_destroyed',
'frozen': '_frozen'
}
# ANSIBLE_LXD_STATES is a map of states of lxd containers to the Ansible
# lxc_container module state parameter value.
ANSIBLE_LXD_STATES = {
'Running': 'started',
'Stopped': 'stopped',
'Frozen': 'frozen',
}
# CONFIG_PARAMS is a list of config attribute names.
CONFIG_PARAMS = [
'architecture', 'config', 'devices', 'ephemeral', 'profiles', 'source'
]
try:
callable(all)
except NameError:
# For python <2.5
# This definition is copied from https://docs.python.org/2/library/functions.html#all
def all(iterable):
for element in iterable:
if not element:
return False
return True
class LXDContainerManagement(object):
def __init__(self, module):
"""Management of LXC containers via Ansible.
:param module: Processed Ansible Module.
:type module: ``object``
"""
self.module = module
self.name = self.module.params['name']
self._build_config()
self.state = self.module.params['state']
self.timeout = self.module.params['timeout']
self.wait_for_ipv4_addresses = self.module.params['wait_for_ipv4_addresses']
self.force_stop = self.module.params['force_stop']
self.addresses = None
self.url = self.module.params['url']
self.key_file = self.module.params.get('key_file', None)
self.cert_file = self.module.params.get('cert_file', None)
self.debug = self.module._verbosity >= 4
try:
self.client = LXDClient(
self.url, key_file=self.key_file, cert_file=self.cert_file,
debug=self.debug
)
except LXDClientException as e:
self.module.fail_json(msg=e.msg)
self.trust_password = self.module.params.get('trust_password', None)
self.actions = []
def _build_config(self):
self.config = {}
for attr in CONFIG_PARAMS:
param_val = self.module.params.get(attr, None)
if param_val is not None:
self.config[attr] = param_val
def _get_container_json(self):
return self.client.do(
'GET', '/1.0/containers/{0}'.format(self.name),
ok_error_codes=[404]
)
def _get_container_state_json(self):
return self.client.do(
'GET', '/1.0/containers/{0}/state'.format(self.name),
ok_error_codes=[404]
)
@staticmethod
def _container_json_to_module_state(resp_json):
if resp_json['type'] == 'error':
return 'absent'
return ANSIBLE_LXD_STATES[resp_json['metadata']['status']]
def _change_state(self, action, force_stop=False):
body_json={'action': action, 'timeout': self.timeout}
if force_stop:
body_json['force'] = True
return self.client.do('PUT', '/1.0/containers/{0}/state'.format(self.name), body_json=body_json)
def _create_container(self):
config = self.config.copy()
config['name'] = self.name
self.client.do('POST', '/1.0/containers', config)
self.actions.append('create')
def _start_container(self):
self._change_state('start')
self.actions.append('start')
def _stop_container(self):
self._change_state('stop', self.force_stop)
self.actions.append('stop')
def _restart_container(self):
self._change_state('restart', self.force_stop)
self.actions.append('restart')
def _delete_container(self):
self.client.do('DELETE', '/1.0/containers/{0}'.format(self.name))
self.actions.append('delete')
def _freeze_container(self):
self._change_state('freeze')
self.actions.append('freeze')
def _unfreeze_container(self):
self._change_state('unfreeze')
self.actions.append('unfreez')
def _container_ipv4_addresses(self, ignore_devices=['lo']):
resp_json = self._get_container_state_json()
network = resp_json['metadata']['network'] or {}
network = dict((k, v) for k, v in network.items() if k not in ignore_devices) or {}
addresses = dict((k, [a['address'] for a in v['addresses'] if a['family'] == 'inet']) for k, v in network.items()) or {}
return addresses
@staticmethod
def _has_all_ipv4_addresses(addresses):
return len(addresses) > 0 and all([len(v) > 0 for v in addresses.itervalues()])
def _get_addresses(self):
try:
due = datetime.datetime.now() + datetime.timedelta(seconds=self.timeout)
while datetime.datetime.now() < due:
time.sleep(1)
addresses = self._container_ipv4_addresses()
if self._has_all_ipv4_addresses(addresses):
self.addresses = addresses
return
except LXDClientException as e:
e.msg = 'timeout for getting IPv4 addresses'
raise
def _started(self):
if self.old_state == 'absent':
self._create_container()
self._start_container()
else:
if self.old_state == 'frozen':
self._unfreeze_container()
elif self.old_state == 'stopped':
self._start_container()
if self._needs_to_apply_container_configs():
self._apply_container_configs()
if self.wait_for_ipv4_addresses:
self._get_addresses()
def _stopped(self):
if self.old_state == 'absent':
self._create_container()
else:
if self.old_state == 'stopped':
if self._needs_to_apply_container_configs():
self._start_container()
self._apply_container_configs()
self._stop_container()
else:
if self.old_state == 'frozen':
self._unfreeze_container()
if self._needs_to_apply_container_configs():
self._apply_container_configs()
self._stop_container()
def _restarted(self):
if self.old_state == 'absent':
self._create_container()
self._start_container()
else:
if self.old_state == 'frozen':
self._unfreeze_container()
if self._needs_to_apply_container_configs():
self._apply_container_configs()
self._restart_container()
if self.wait_for_ipv4_addresses:
self._get_addresses()
def _destroyed(self):
if self.old_state != 'absent':
if self.old_state == 'frozen':
self._unfreeze_container()
if self.old_state != 'stopped':
self._stop_container()
self._delete_container()
def _frozen(self):
if self.old_state == 'absent':
self._create_container()
self._start_container()
self._freeze_container()
else:
if self.old_state == 'stopped':
self._start_container()
if self._needs_to_apply_container_configs():
self._apply_container_configs()
self._freeze_container()
def _needs_to_change_container_config(self, key):
if key not in self.config:
return False
if key == 'config':
old_configs = dict((k, v) for k, v in self.old_container_json['metadata'][key].items() if not k.startswith('volatile.'))
else:
old_configs = self.old_container_json['metadata'][key]
return self.config[key] != old_configs
def _needs_to_apply_container_configs(self):
return (
self._needs_to_change_container_config('architecture') or
self._needs_to_change_container_config('config') or
self._needs_to_change_container_config('ephemeral') or
self._needs_to_change_container_config('devices') or
self._needs_to_change_container_config('profiles')
)
def _apply_container_configs(self):
old_metadata = self.old_container_json['metadata']
body_json = {
'architecture': old_metadata['architecture'],
'config': old_metadata['config'],
'devices': old_metadata['devices'],
'profiles': old_metadata['profiles']
}
if self._needs_to_change_container_config('architecture'):
body_json['architecture'] = self.config['architecture']
if self._needs_to_change_container_config('config'):
for k, v in self.config['config'].items():
body_json['config'][k] = v
if self._needs_to_change_container_config('ephemeral'):
body_json['ephemeral'] = self.config['ephemeral']
if self._needs_to_change_container_config('devices'):
body_json['devices'] = self.config['devices']
if self._needs_to_change_container_config('profiles'):
body_json['profiles'] = self.config['profiles']
self.client.do('PUT', '/1.0/containers/{0}'.format(self.name), body_json=body_json)
self.actions.append('apply_container_configs')
def run(self):
"""Run the main method."""
try:
if self.trust_password is not None:
self.client.authenticate(self.trust_password)
self.old_container_json = self._get_container_json()
self.old_state = self._container_json_to_module_state(self.old_container_json)
action = getattr(self, LXD_ANSIBLE_STATES[self.state])
action()
state_changed = len(self.actions) > 0
result_json = {
'log_verbosity': self.module._verbosity,
'changed': state_changed,
'old_state': self.old_state,
'actions': self.actions
}
if self.client.debug:
result_json['logs'] = self.client.logs
if self.addresses is not None:
result_json['addresses'] = self.addresses
self.module.exit_json(**result_json)
except LXDClientException as e:
state_changed = len(self.actions) > 0
fail_params = {
'msg': e.msg,
'changed': state_changed,
'actions': self.actions
}
if self.client.debug:
fail_params['logs'] = e.kwargs['logs']
self.module.fail_json(**fail_params)
def main():
"""Ansible Main module."""
module = AnsibleModule(
argument_spec=dict(
name=dict(
type='str',
required=True
),
architecture=dict(
type='str',
),
config=dict(
type='dict',
),
description=dict(
type='str',
),
devices=dict(
type='dict',
),
ephemeral=dict(
type='bool',
),
profiles=dict(
type='list',
),
source=dict(
type='dict',
),
state=dict(
choices=LXD_ANSIBLE_STATES.keys(),
default='started'
),
timeout=dict(
type='int',
default=30
),
wait_for_ipv4_addresses=dict(
type='bool',
default=False
),
force_stop=dict(
type='bool',
default=False
),
url=dict(
type='str',
default='unix:/var/lib/lxd/unix.socket'
),
key_file=dict(
type='str',
default='{}/.config/lxc/client.key'.format(os.environ['HOME'])
),
cert_file=dict(
type='str',
default='{}/.config/lxc/client.crt'.format(os.environ['HOME'])
),
trust_password=dict(
type='str',
)
),
supports_check_mode=False,
)
lxd_manage = LXDContainerManagement(module=module)
lxd_manage.run()
# import module bits
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
superdesk/superdesk-core
|
refs/heads/develop
|
content_api/packages_versions/__init__.py
|
1
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import superdesk
from .service import PackagesVersionsService
from .resource import PackagesVersionsResource
def init_app(app) -> None:
endpoint_name = "packages_versions"
service = PackagesVersionsService(endpoint_name, backend=superdesk.get_backend())
PackagesVersionsResource(endpoint_name, app=app, service=service)
|
mdaniel/intellij-community
|
refs/heads/master
|
python/testData/refactoring/rename/renameShadowingVariable_after.py
|
83
|
def lab(): pass
bar = 1
print(bar)
|
Jesus89/apio
|
refs/heads/develop
|
setup.py
|
1
|
# -*- coding: utf-8 -*-
import os
import json
from setuptools import setup
from apio import (__author__, __description__, __email__, __license__,
__title__, __url__, __version__)
# Load extras_require
extras_require = {}
filepath = os.path.join('apio', 'resources', 'distribution.json')
with open(filepath, 'r') as f:
resource = json.loads(f.read())
pip_packages = resource.get('pip_packages', {})
extras_require = {k: [k + v] for k, v in pip_packages.items()}
setup(
name=__title__,
version=__version__,
description=__description__,
author=__author__,
author_email=__email__,
url=__url__,
license=__license__,
packages=['apio'],
package_data={
'apio': ['commands/*.py',
'managers/*.py',
'resources/*']
},
install_requires=[
'click>=5,<7',
'semantic_version>=2.5.0,<3',
'requests>=2.4.0,<3',
'pyjwt>=1.5.3,<2',
'colorama',
'pyserial>=3,<4'
],
extras_require=extras_require,
entry_points={
'console_scripts': ['apio=apio.__main__:cli']
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3']
)
|
henriknelson/micropython
|
refs/heads/master
|
tests/basics/floordivide_intbig.py
|
45
|
# check modulo matches python definition
a = 987654321987987987987987987987
b = 19
print(a // b)
print(a // -b)
print(-a // b)
print(-a // -b)
a = 10000000000000000000000000000000000000000000
b = 100
print(a // b)
print(a // -b)
print(-a // b)
print(-a // -b)
|
mdaniel/intellij-community
|
refs/heads/master
|
python/testData/completion/className/function/function.after.py
|
83
|
from mypackage import shazam
shazam()
|
sharpdeep/pyspider
|
refs/heads/master
|
pyspider/libs/utils.py
|
53
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<i@binux.me>
# http://binux.me
# Created on 2012-11-06 11:50:13
import logging
import hashlib
import datetime
import base64
import six
from six import iteritems
md5string = lambda x: hashlib.md5(utf8(x)).hexdigest()
class ReadOnlyDict(dict):
"""A Read Only Dict"""
def __setitem__(self, key, value):
raise Exception("dict is read-only")
def getitem(obj, key=0, default=None):
"""Get first element of list or return default"""
try:
return obj[key]
except:
return default
def hide_me(tb, g=globals()):
"""Hide stack traceback of given stack"""
base_tb = tb
try:
while tb and tb.tb_frame.f_globals is not g:
tb = tb.tb_next
while tb and tb.tb_frame.f_globals is g:
tb = tb.tb_next
except Exception as e:
logging.exception(e)
tb = base_tb
if not tb:
tb = base_tb
return tb
def run_in_thread(func, *args, **kwargs):
"""Run function in thread, return a Thread object"""
from threading import Thread
thread = Thread(target=func, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
return thread
def run_in_subprocess(func, *args, **kwargs):
"""Run function in subprocess, return a Process object"""
from multiprocessing import Process
thread = Process(target=func, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
return thread
def format_date(date, gmt_offset=0, relative=True, shorter=False, full_format=False):
"""Formats the given date (which should be GMT).
By default, we return a relative time (e.g., "2 minutes ago"). You
can return an absolute date string with ``relative=False``.
You can force a full format date ("July 10, 1980") with
``full_format=True``.
This method is primarily intended for dates in the past.
For dates in the future, we fall back to full format.
From tornado
"""
if not date:
return '-'
if isinstance(date, float) or isinstance(date, int):
date = datetime.datetime.utcfromtimestamp(date)
now = datetime.datetime.utcnow()
if date > now:
if relative and (date - now).seconds < 60:
# Due to click skew, things are some things slightly
# in the future. Round timestamps in the immediate
# future down to now in relative mode.
date = now
else:
# Otherwise, future dates always use the full format.
full_format = True
local_date = date - datetime.timedelta(minutes=gmt_offset)
local_now = now - datetime.timedelta(minutes=gmt_offset)
local_yesterday = local_now - datetime.timedelta(hours=24)
difference = now - date
seconds = difference.seconds
days = difference.days
format = None
if not full_format:
if relative and days == 0:
if seconds < 50:
return ("1 second ago" if seconds <= 1 else
"%(seconds)d seconds ago") % {"seconds": seconds}
if seconds < 50 * 60:
minutes = round(seconds / 60.0)
return ("1 minute ago" if minutes <= 1 else
"%(minutes)d minutes ago") % {"minutes": minutes}
hours = round(seconds / (60.0 * 60))
return ("1 hour ago" if hours <= 1 else
"%(hours)d hours ago") % {"hours": hours}
if days == 0:
format = "%(time)s"
elif days == 1 and local_date.day == local_yesterday.day and \
relative:
format = "yesterday" if shorter else "yesterday at %(time)s"
elif days < 5:
format = "%(weekday)s" if shorter else "%(weekday)s at %(time)s"
elif days < 334: # 11mo, since confusing for same month last year
format = "%(month_name)s-%(day)s" if shorter else \
"%(month_name)s-%(day)s at %(time)s"
if format is None:
format = "%(month_name)s %(day)s, %(year)s" if shorter else \
"%(month_name)s %(day)s, %(year)s at %(time)s"
str_time = "%d:%02d" % (local_date.hour, local_date.minute)
return format % {
"month_name": local_date.month - 1,
"weekday": local_date.weekday(),
"day": str(local_date.day),
"year": str(local_date.year),
"time": str_time
}
class TimeoutError(Exception):
pass
try:
import signal
if not hasattr(signal, 'SIGALRM'):
raise ImportError('signal')
class timeout:
"""
Time limit of command
with timeout(3):
time.sleep(10)
"""
def __init__(self, seconds=1, error_message='Timeout'):
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum, frame):
raise TimeoutError(self.error_message)
def __enter__(self):
if self.seconds:
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, type, value, traceback):
if self.seconds:
signal.alarm(0)
except ImportError:
class timeout:
"""
Time limit of command (for windows)
"""
def __init__(self, seconds=1, error_message='Timeout'):
pass
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
def utf8(string):
"""
Make sure string is utf8 encoded bytes.
If parameter is a object, object.__str__ will been called before encode as bytes
"""
if isinstance(string, six.text_type):
return string.encode('utf8')
elif isinstance(string, six.binary_type):
return string
else:
return six.text_type(string).encode('utf8')
def text(string, encoding='utf8'):
"""
Make sure string is unicode type, decode with given encoding if it's not.
If parameter is a object, object.__str__ will been called
"""
if isinstance(string, six.text_type):
return string
elif isinstance(string, six.binary_type):
return string.decode(encoding)
else:
return six.text_type(string)
def pretty_unicode(string):
"""
Make sure string is unicode, try to decode with utf8, or unicode escaped string if failed.
"""
if isinstance(string, six.text_type):
return string
try:
return string.decode("utf8")
except UnicodeDecodeError:
return string.decode('Latin-1').encode('unicode_escape')
def unicode_string(string):
"""
Make sure string is unicode, try to default with utf8, or base64 if failed.
can been decode by `decode_unicode_string`
"""
if isinstance(string, six.text_type):
return string
try:
return string.decode("utf8")
except UnicodeDecodeError:
return '[BASE64-DATA]' + base64.b64encode(string) + '[/BASE64-DATA]'
def unicode_dict(_dict):
"""
Make sure keys and values of dict is unicode.
"""
r = {}
for k, v in iteritems(_dict):
r[unicode_string(k)] = unicode_obj(v)
return r
def unicode_list(_list):
"""
Make sure every element in list is unicode. bytes will encode in base64
"""
return [unicode_obj(x) for x in _list]
def unicode_obj(obj):
"""
Make sure keys and values of dict/list/tuple is unicode. bytes will encode in base64.
Can been decode by `decode_unicode_obj`
"""
if isinstance(obj, dict):
return unicode_dict(obj)
elif isinstance(obj, (list, tuple)):
return unicode_list(obj)
elif isinstance(obj, six.string_types):
return unicode_string(obj)
elif isinstance(obj, (int, float)):
return obj
elif obj is None:
return obj
else:
try:
return text(obj)
except:
return text(repr(obj))
def decode_unicode_string(string):
"""
Decode string encoded by `unicode_string`
"""
if string.startswith('[BASE64-DATA]') and string.endswith('[/BASE64-DATA]'):
return base64.b64decode(string[len('[BASE64-DATA]'):-len('[/BASE64-DATA]')])
return string
def decode_unicode_obj(obj):
"""
Decode unicoded dict/list/tuple encoded by `unicode_obj`
"""
if isinstance(obj, dict):
r = {}
for k, v in iteritems(obj):
r[decode_unicode_string(k)] = decode_unicode_obj(v)
return r
elif isinstance(obj, six.string_types):
return decode_unicode_string(obj)
elif isinstance(obj, (list, tuple)):
return [decode_unicode_obj(x) for x in obj]
else:
return obj
class Get(object):
"""
Lazy value calculate for object
"""
def __init__(self, getter):
self.getter = getter
def __get__(self, instance, owner):
return self.getter()
class ObjectDict(dict):
"""
Object like dict, every dict[key] can visite by dict.key
If dict[key] is `Get`, calculate it's value.
"""
def __getattr__(self, name):
ret = self.__getitem__(name)
if hasattr(ret, '__get__'):
return ret.__get__(self, ObjectDict)
return ret
def load_object(name):
"""Load object from module"""
if "." not in name:
raise Exception('load object need module.object')
module_name, object_name = name.rsplit('.', 1)
if six.PY2:
module = __import__(module_name, globals(), locals(), [utf8(object_name)], -1)
else:
module = __import__(module_name, globals(), locals(), [object_name])
return getattr(module, object_name)
def get_python_console(namespace=None):
"""
Return a interactive python console instance with caller's stack
"""
if namespace is None:
import inspect
frame = inspect.currentframe()
caller = frame.f_back
if not caller:
logging.error("can't find caller who start this console.")
caller = frame
namespace = dict(caller.f_globals)
namespace.update(caller.f_locals)
try:
from IPython.terminal.interactiveshell import TerminalInteractiveShell
shell = TerminalInteractiveShell(user_ns=namespace)
except ImportError:
try:
import readline
import rlcompleter
readline.set_completer(rlcompleter.Completer(namespace).complete)
readline.parse_and_bind("tab: complete")
except ImportError:
pass
import code
shell = code.InteractiveConsole(namespace)
shell._quit = False
def exit():
shell._quit = True
def readfunc(prompt=""):
if shell._quit:
raise EOFError
return six.moves.input(prompt)
# inject exit method
shell.ask_exit = exit
shell.raw_input = readfunc
return shell
def python_console(namespace=None):
"""Start a interactive python console with caller's stack"""
if namespace is None:
import inspect
frame = inspect.currentframe()
caller = frame.f_back
if not caller:
logging.error("can't find caller who start this console.")
caller = frame
namespace = dict(caller.f_globals)
namespace.update(caller.f_locals)
return get_python_console(namespace=namespace).interact()
|
cpennington/edx-platform
|
refs/heads/master
|
openedx/features/course_duration_limits/tests/__init__.py
|
12133432
| |
japeto/Vigtech-Services
|
refs/heads/master
|
env/lib/python2.7/site-packages/pip/_vendor/pkg_resources/tests/__init__.py
|
12133432
| |
etkirsch/legends-of-erukar
|
refs/heads/master
|
erukar/content/enemies/human/VelmyreanCleric.py
|
12133432
| |
slyphon/pants
|
refs/heads/master
|
contrib/cpp/src/python/pants/contrib/cpp/__init__.py
|
12133432
| |
byt3bl33d3r/CrackMapExec
|
refs/heads/master
|
cme/protocols/ssh/__init__.py
|
12133432
| |
postlund/home-assistant
|
refs/heads/dev
|
homeassistant/components/eliqonline/__init__.py
|
36
|
"""The eliqonline component."""
|
blackmiaool/rt-thread
|
refs/heads/master
|
tools/buildbot.py
|
39
|
import os
import sys
def usage():
print '%s all -- build all bsp' % os.path.basename(sys.argv[0])
print '%s clean -- clean all bsp' % os.path.basename(sys.argv[0])
print '%s project -- update all prject files' % os.path.basename(sys.argv[0])
BSP_ROOT = '../bsp'
if len(sys.argv) != 2:
usage()
sys.exit(0)
# get command options
command = ''
if sys.argv[1] == 'all':
command = ' '
elif sys.argv[1] == 'clean':
command = ' -c'
elif sys.argv[1] == 'project':
command = ' --target=mdk -s'
projects = os.listdir(BSP_ROOT)
for item in projects:
project_dir = os.path.join(BSP_ROOT, item)
if os.path.isfile(os.path.join(project_dir, 'template.uvproj')):
print ('prepare MDK project file on ' + project_dir)
os.system('scons --directory=' + project_dir + command)
sys.exit(0)
else:
usage()
sys.exit(0)
projects = os.listdir(BSP_ROOT)
for item in projects:
project_dir = os.path.join(BSP_ROOT, item)
if os.path.isfile(os.path.join(project_dir, 'SConstruct')):
if os.system('scons --directory=' + project_dir + command) != 0:
print 'build failed!!'
break
|
hcsturix74/django
|
refs/heads/master
|
tests/model_fields/test_uuid.py
|
81
|
import json
import uuid
from django.core import exceptions, serializers
from django.db import models
from django.test import SimpleTestCase, TestCase
from .models import (
NullableUUIDModel, PrimaryKeyUUIDModel, RelatedToUUIDModel, UUIDGrandchild,
UUIDModel,
)
class TestSaveLoad(TestCase):
def test_uuid_instance(self):
instance = UUIDModel.objects.create(field=uuid.uuid4())
loaded = UUIDModel.objects.get()
self.assertEqual(loaded.field, instance.field)
def test_str_instance_no_hyphens(self):
UUIDModel.objects.create(field='550e8400e29b41d4a716446655440000')
loaded = UUIDModel.objects.get()
self.assertEqual(loaded.field, uuid.UUID('550e8400e29b41d4a716446655440000'))
def test_str_instance_hyphens(self):
UUIDModel.objects.create(field='550e8400-e29b-41d4-a716-446655440000')
loaded = UUIDModel.objects.get()
self.assertEqual(loaded.field, uuid.UUID('550e8400e29b41d4a716446655440000'))
def test_str_instance_bad_hyphens(self):
UUIDModel.objects.create(field='550e84-00-e29b-41d4-a716-4-466-55440000')
loaded = UUIDModel.objects.get()
self.assertEqual(loaded.field, uuid.UUID('550e8400e29b41d4a716446655440000'))
def test_null_handling(self):
NullableUUIDModel.objects.create(field=None)
loaded = NullableUUIDModel.objects.get()
self.assertEqual(loaded.field, None)
def test_pk_validated(self):
with self.assertRaisesMessage(TypeError, 'is not a valid UUID'):
PrimaryKeyUUIDModel.objects.get(pk={})
with self.assertRaisesMessage(TypeError, 'is not a valid UUID'):
PrimaryKeyUUIDModel.objects.get(pk=[])
def test_wrong_value(self):
self.assertRaisesMessage(
ValueError, 'badly formed hexadecimal UUID string',
UUIDModel.objects.get, field='not-a-uuid')
self.assertRaisesMessage(
ValueError, 'badly formed hexadecimal UUID string',
UUIDModel.objects.create, field='not-a-uuid')
class TestMigrations(SimpleTestCase):
def test_deconstruct(self):
field = models.UUIDField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(kwargs, {})
class TestQuerying(TestCase):
def setUp(self):
self.objs = [
NullableUUIDModel.objects.create(field=uuid.uuid4()),
NullableUUIDModel.objects.create(field='550e8400e29b41d4a716446655440000'),
NullableUUIDModel.objects.create(field=None),
]
def test_exact(self):
self.assertSequenceEqual(
NullableUUIDModel.objects.filter(field__exact='550e8400e29b41d4a716446655440000'),
[self.objs[1]]
)
def test_isnull(self):
self.assertSequenceEqual(
NullableUUIDModel.objects.filter(field__isnull=True),
[self.objs[2]]
)
class TestSerialization(SimpleTestCase):
test_data = '[{"fields": {"field": "550e8400-e29b-41d4-a716-446655440000"}, "model": "model_fields.uuidmodel", "pk": null}]'
def test_dumping(self):
instance = UUIDModel(field=uuid.UUID('550e8400e29b41d4a716446655440000'))
data = serializers.serialize('json', [instance])
self.assertEqual(json.loads(data), json.loads(self.test_data))
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.field, uuid.UUID('550e8400-e29b-41d4-a716-446655440000'))
class TestValidation(SimpleTestCase):
def test_invalid_uuid(self):
field = models.UUIDField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('550e8400', None)
self.assertEqual(cm.exception.code, 'invalid')
self.assertEqual(cm.exception.message % cm.exception.params, "'550e8400' is not a valid UUID.")
def test_uuid_instance_ok(self):
field = models.UUIDField()
field.clean(uuid.uuid4(), None) # no error
class TestAsPrimaryKey(TestCase):
def test_creation(self):
PrimaryKeyUUIDModel.objects.create()
loaded = PrimaryKeyUUIDModel.objects.get()
self.assertIsInstance(loaded.pk, uuid.UUID)
def test_uuid_pk_on_save(self):
saved = PrimaryKeyUUIDModel.objects.create(id=None)
loaded = PrimaryKeyUUIDModel.objects.get()
self.assertIsNotNone(loaded.id, None)
self.assertEqual(loaded.id, saved.id)
def test_uuid_pk_on_bulk_create(self):
u1 = PrimaryKeyUUIDModel()
u2 = PrimaryKeyUUIDModel(id=None)
PrimaryKeyUUIDModel.objects.bulk_create([u1, u2])
# Check that the two objects were correctly created.
u1_found = PrimaryKeyUUIDModel.objects.filter(id=u1.id).exists()
u2_found = PrimaryKeyUUIDModel.objects.exclude(id=u1.id).exists()
self.assertTrue(u1_found)
self.assertTrue(u2_found)
self.assertEqual(PrimaryKeyUUIDModel.objects.count(), 2)
def test_underlying_field(self):
pk_model = PrimaryKeyUUIDModel.objects.create()
RelatedToUUIDModel.objects.create(uuid_fk=pk_model)
related = RelatedToUUIDModel.objects.get()
self.assertEqual(related.uuid_fk.pk, related.uuid_fk_id)
def test_update_with_related_model_instance(self):
# regression for #24611
u1 = PrimaryKeyUUIDModel.objects.create()
u2 = PrimaryKeyUUIDModel.objects.create()
r = RelatedToUUIDModel.objects.create(uuid_fk=u1)
RelatedToUUIDModel.objects.update(uuid_fk=u2)
r.refresh_from_db()
self.assertEqual(r.uuid_fk, u2)
def test_update_with_related_model_id(self):
u1 = PrimaryKeyUUIDModel.objects.create()
u2 = PrimaryKeyUUIDModel.objects.create()
r = RelatedToUUIDModel.objects.create(uuid_fk=u1)
RelatedToUUIDModel.objects.update(uuid_fk=u2.pk)
r.refresh_from_db()
self.assertEqual(r.uuid_fk, u2)
def test_two_level_foreign_keys(self):
# exercises ForeignKey.get_db_prep_value()
UUIDGrandchild().save()
|
lz1988/django-web2015
|
refs/heads/master
|
tests/regressiontests/staticfiles_tests/urls/default.py
|
176
|
from django.conf.urls import patterns, url
urlpatterns = patterns('',
url(r'^static/(?P<path>.*)$', 'django.contrib.staticfiles.views.serve'),
)
|
CDNoyes/EDL-Py
|
refs/heads/master
|
EntryGuidance/chebyshevPDG.py
|
1
|
import numpy as np
from numpy import sin, cos, pi
import matplotlib.pyplot as plt
# from scipy.optimize import minimize, differential_evolution
import pyOpt
from scipy.integrate import cumtrapz
from scipy.interpolate import interp1d
def ChebyshevDiff(n):
# %Returns the (Sorted) Chebyshev Differentiation Matrix, D, with n nodes
# %using the collocated points t.
# %
# %Unsorted collocation points are used to compute the standard
# %differentiation matrix, then the matrix is converted to its sorted form.
# %See Ross, Fahroo (2002) for details.
t = cos(pi*np.arange(0,n+1)/n) # %Unsorted collocation points
D = np.empty((n+1,n+1))
for j in range(n+1):
for k in range(n+1):
if j == 0 and j == k:
D[k,j] = (2*n**2+1)/6.
elif j == n and j ==k:
D[k,j] = -(2*n**2+1)/6.
elif j == k:
D[k,j] = -t[k]/2./(1-t[k]**2)
else:
if k == 0 or k == n:
ck = 2.
else:
ck = 1.
if j == 0 or j == n:
cj = 2.
else:
cj = 1.
D[k,j] = (ck/cj)*( ( (-1)**(j+k) ) / (t[k]-t[j]) )
return -D
def Cost(c,problem):
g = 3.7
ve = problem['ve']
D = problem['D']
N = problem['N']
x = np.hstack((problem['x0'], c[0:N], problem['xf']))
y = np.hstack((problem['y0'], c[N:N*2], problem['yf']))
tf = c[-1]
D = D*2/tf
tol = 1
tau = -cos(pi*np.arange(0,N+2)/(N+1))
t = (tau+1)*0.5*tf
u = np.dot(D,x)
v = np.dot(D,y)
udot = np.dot(D,u)
vdot = np.dot(D,v)
if np.any(np.iscomplex(vdot)) or np.any(np.iscomplex(udot)):
mu = np.arctan((vdot+g)/udot)
else:
mu = np.arctan2(vdot+g,udot)
mudot = np.dot(D,mu)
r = -(g+vdot)/(ve*sin(mu))
m = problem['m0']*np.exp(cumtrapz(r,t,initial=0))
T = m*(udot)/(cos(mu))
T = interp1d(tau,T)(np.linspace(-1,1,problem['nConstraint'])) # Interpolate to finer grid for better constraint satisfaction
mudot = interp1d(tau,mudot)(np.linspace(-1,1,problem['nConstraint'])) # Interpolate to finer grid for better constraint satisfaction
g = [
#Six Equality Constraints - Independent of the order of the solution
u[0]-problem['u0'],
v[0]-problem['v0'],
u[-1]-problem['uf'],
v[-1]-problem['vf'],
mu[0] - problem['mu0'],
udot[-1]-problem['udotf']] #Ensures a vertical landing
#Inequality Constraints on thrust magnitude - Dependent on order of solution
g.extend(mudot - problem['mudotmax'])
g.extend(problem['mudotmin'] - mudot)
g.extend(T-problem['Tmax'])
g.extend(problem['Tmin']-T)
fail = 0
return -m[-1], g, fail
def Opt():
problem = {}
# Problem Solution Info #
order = 5 # Order should be kept relatively low <=6, if more accuracy is required, increase the number of partitions
N = order-1
problem['N'] = N
problem['nConstraint'] = 10
problem['nDivisions'] = 1 # number of segments, each fitted with its own polynomial of the specified order
# Problem Info #
isp = 290
problem['x0'] = -3200
problem['xf'] = 0
problem['y0'] = 2000
problem['yf'] = 0
problem['u0'] = 625
problem['uf'] = 0
problem['v0'] = -270
problem['vf'] = 0
problem['udotf'] = 0
problem['m0'] = 8500
problem['ve'] = isp*9.81
thrust = 600000
problem['Tmax'] = thrust
problem['Tmin'] = thrust*0.1 # 10% throttle
V0 = (problem['u0']**2 + problem['v0']**2)**0.5
fpa0 = np.arcsin(problem['v0']/V0)
problem['mu0'] = np.pi+fpa0
problem['mudotmax'] = 40*np.pi/180 # 40 deg/s
problem['mudotmin'] = -problem['mudotmax']
# Initial Guess
tf = 12
x = np.linspace(problem['x0'],problem['xf'],order+1)
y = np.linspace(problem['y0'],problem['yf'],order+1)
# tau = -cos(pi*np.arange(0,N+2)/(N+1))
# t = (tau+1)*0.5*tf
# x = interp1d(np.linspace(0,tf,order+1),x)(t)
# y = interp1d(np.linspace(0,tf,order+1),y)(t)
c0 = np.hstack((x[1:-1],y[1:-1],tf))
# Form D
problem['D'] = ChebyshevDiff(order)
opt = pyOpt.Optimization('Flat Pseudospectral PDG',lambda c: Cost(c,problem))
# Add the design variables
for i,xi in enumerate(x[1:-1]):
opt.addVar('x{}'.format(i+1), 'c', lower = problem['x0'], upper = problem['xf'], value = xi)
for i,xi in enumerate(y[1:-1]):
opt.addVar('y{}'.format(i+1), 'c', lower = problem['yf'], upper = problem['y0'], value = xi)
opt.addVar('tf','c', lower = 5, upper = 50, value=tf)
# Add the objective and constraints
opt.addObj('J')
for i in range(1,7):
opt.addCon('g{}'.format(i),'e')
for i in range(1,4*problem['nConstraint'] + 0*order):
opt.addCon('h{}'.format(i),'i')
# optimizer = pyOpt.COBYLA()
# optimizer = pyOpt.ALPSO()
optimizer = pyOpt.ALGENCAN()
# optimizer = pyOpt.SLSQP()
# optimizer = pyOpt.SDPEN()
# optimizer = pyOpt.PSQP()
# optimizer = pyOpt.SOLVOPT()
sens_type = 'CS' # Differencing Type, options ['FD', CS']
# optimizer.setOption('MAXIT',100) #SLSQP option
# optimizer.setOption('MIT',200) # PSQP
# fopt,copt,info = optimizer(opt,sens_type=sens_type)
fopt,copt,info = optimizer(opt)
print info
print opt.solution(0)
t,x,y,u,v,udot,vdot,m,T,mu,mudot = Parse(copt,problem)
plt.figure()
plt.plot(x,y)
plt.title('Positions')
plt.figure()
plt.plot(u,v)
plt.title('Velocities')
plt.figure()
plt.plot(udot,vdot)
plt.title('Accelerations')
plt.figure()
plt.plot(t,m)
plt.title('Mass')
plt.figure()
plt.plot(t,mu*180/np.pi)
plt.title('Thrust Angle')
plt.figure()
plt.plot(t,T)
plt.title('Thrust')
plt.figure()
plt.plot(t,x)
plt.figure()
plt.plot(t,y)
plt.show()
def Parse(c,problem):
g = 3.7
ve = problem['ve']
D = problem['D']
N = problem['N']
x = np.hstack((problem['x0'], c[0:N], problem['xf']))
y = np.hstack((problem['y0'], c[N:N*2], problem['yf']))
tf = c[-1]
D = D*2/tf
tau = -cos(pi*np.arange(0,N+2)/(N+1))
t = (tau+1)*0.5*tf
u = np.dot(D,x)
v = np.dot(D,y)
udot = np.dot(D,u)
vdot = np.dot(D,v)
mu = np.arctan2(vdot+g,udot)
mudot = np.dot(D,mu)
r = -(g+vdot)/(ve*sin(mu))
m = problem['m0']*np.exp(cumtrapz(r,t,initial=0))
T = m*(udot)/(cos(mu))
tinterp = np.linspace(0,tf)
X = interp1d(t,np.vstack((x,y,u,v,udot,vdot,m,T,mu,mudot)).T,'cubic',axis=0)(tinterp)
x,y,u,v,udot,vdot,m,T,mu,mudot = X[:,0], X[:,1], X[:,2], X[:,3], X[:,4], X[:,5], X[:,6], X[:,7], X[:,8],X[:,9]
return tinterp,x,y,u,v,udot,vdot,m,T,mu,mudot
if __name__ == '__main__':
Opt()
|
anhstudios/swganh
|
refs/heads/develop
|
data/scripts/templates/object/tangible/veteran_reward/shared_emote_group_1.py
|
2
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/veteran_reward/shared_emote_group_1.iff"
result.attribute_template_id = -1
result.stfName("item_n","veteran_emotes_1")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
gades/novacoin
|
refs/heads/master
|
contrib/seeds/makeseeds.py
|
37
|
#!/usr/bin/env python
NSEEDS=600
import re
import sys
from subprocess import check_output
def main():
lines = sys.stdin.readlines()
ips = []
pattern = re.compile(r"^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3}):7777")
for line in lines:
m = pattern.match(line)
if m is None:
continue
ip = 0
for i in range(0,4):
ip = ip + (int(m.group(i+1)) << (8*(i)))
if ip == 0:
continue
ips.append(ip)
for row in range(0, min(NSEEDS,len(ips)), 8):
print " " + ", ".join([ "0x%08x"%i for i in ips[row:row+8] ]) + ","
if __name__ == '__main__':
main()
|
drkitty/cyder
|
refs/heads/master
|
cyder/cydns/tests/__init__.py
|
12133432
| |
ngageoint/voxel-globe
|
refs/heads/nga_p2_release
|
voxel_globe/tiepoint_registration/__init__.py
|
12133432
| |
praekeltfoundation/ndoh-hub
|
refs/heads/develop
|
scripts/migrate_to_rapidpro/__init__.py
|
12133432
| |
lundjordan/services
|
refs/heads/master
|
lib/backend_common/backend_common/api.py
|
1
|
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import pathlib
import connexion
import flask
import werkzeug
import cli_common.log
logger = cli_common.log.get_logger(__name__)
def common_error_handler(exception):
'''TODO: add description
:param extension: TODO
:type exception: Exception
:rtype: TODO:
'''
if not isinstance(exception, werkzeug.exceptions.HTTPException):
exception = werkzeug.exceptions.InternalServerError()
return connexion.problem(
title=exception.name,
detail=exception.description,
status=exception.code,
)
class Api:
'''TODO: add description
TODO: annotate class
'''
def __init__(self, app):
'''
TODO: add description
TODO: annotate function
'''
self.__app = app
logger.debug('Setting JSON encoder.')
app.json_encoder = connexion.apps.flask_app.FlaskJSONEncoder
logger.debug('Setting common error handler for all error codes.')
for error_code in werkzeug.exceptions.default_exceptions:
app.register_error_handler(error_code, common_error_handler)
def register(self,
swagger_file,
base_path=None,
arguments=None,
auth_all_paths=False,
swagger_json=True,
swagger_ui=True,
swagger_path=None,
swagger_url='docs',
validate_responses=True,
strict_validation=True,
resolver=connexion.resolver.Resolver(),
):
'''Adds an API to the application based on a swagger file
:param swagger_file: swagger file with the specification
:type swagger_file: str
:param base_path: base path where to add this api
:type base_path: str | None
:param arguments: api version specific arguments to replace on the
specification
:type arguments: dict | None
:param auth_all_paths: whether to authenticate not defined paths
:type auth_all_paths: bool
:param swagger_json: whether to include swagger json or not
:type swagger_json: bool
:param swagger_ui: whether to include swagger ui or not
:type swagger_ui: bool
:param swagger_path: path to swagger-ui directory
:type swagger_path: string | None
:param swagger_url: URL to access swagger-ui documentation
:type swagger_url: string | None
:param validate_responses: True enables validation. Validation errors
generate HTTP 500 responses.
:type validate_responses: bool
:param strict_validation: True enables validation on invalid request
parameters
:type strict_validation: bool
:param resolver: Operation resolver.
:type resolver: connexion.resolver.Resolver | types.FunctionType
:rtype: None
'''
app = self.__app
if hasattr(resolver, '__call__'):
resolver = connexion.resolver.Resolver(resolver)
logger.debug('Adding API: %s', swagger_file)
self.swagger_url = swagger_url
self.__api = connexion.apis.flask_api.FlaskApi(
specification=pathlib.Path(swagger_file),
base_path=base_path,
arguments=arguments,
swagger_json=swagger_json,
swagger_ui=swagger_ui,
swagger_path=swagger_path,
swagger_url=swagger_url,
resolver=resolver,
validate_responses=validate_responses,
strict_validation=strict_validation,
auth_all_paths=auth_all_paths,
debug=app.debug,
)
app.register_blueprint(self.__api.blueprint)
for code, exception in werkzeug.exceptions.default_exceptions.items():
app.register_error_handler(exception, handle_default_exceptions)
return self.__api
def handle_default_exceptions_raw(e):
code = getattr(e, 'code', 500)
description = getattr(e, 'description', str(e))
return {
'type': 'about:blank',
'title': str(e),
'status': code,
'detail': description,
'instance': 'about:blank',
}
def handle_default_exceptions(e):
error = handle_default_exceptions_raw(e)
return flask.jsonify(error), error['status']
def init_app(app):
return Api(app)
def app_heartbeat():
pass
|
devttys1/linux-fslc
|
refs/heads/patches-4.0
|
scripts/gdb/linux/__init__.py
|
2010
|
# nothing to do for the initialization of this package
|
paulsmith/geodjango
|
refs/heads/master
|
django/template/loader.py
|
28
|
# Wrapper for loading templates from storage of some sort (e.g. filesystem, database).
#
# This uses the TEMPLATE_LOADERS setting, which is a list of loaders to use.
# Each loader is expected to have this interface:
#
# callable(name, dirs=[])
#
# name is the template name.
# dirs is an optional list of directories to search instead of TEMPLATE_DIRS.
#
# The loader should return a tuple of (template_source, path). The path returned
# might be shown to the user for debugging purposes, so it should identify where
# the template was loaded from.
#
# Each loader should have an "is_usable" attribute set. This is a boolean that
# specifies whether the loader can be used in this Python installation. Each
# loader is responsible for setting this when it's initialized.
#
# For example, the eggs loader (which is capable of loading templates from
# Python eggs) sets is_usable to False if the "pkg_resources" module isn't
# installed, because pkg_resources is necessary to read eggs.
from django.core.exceptions import ImproperlyConfigured
from django.template import Origin, Template, Context, TemplateDoesNotExist, add_to_builtins
from django.conf import settings
template_source_loaders = None
class LoaderOrigin(Origin):
def __init__(self, display_name, loader, name, dirs):
super(LoaderOrigin, self).__init__(display_name)
self.loader, self.loadname, self.dirs = loader, name, dirs
def reload(self):
return self.loader(self.loadname, self.dirs)[0]
def make_origin(display_name, loader, name, dirs):
if settings.TEMPLATE_DEBUG:
return LoaderOrigin(display_name, loader, name, dirs)
else:
return None
def find_template_source(name, dirs=None):
# Calculate template_source_loaders the first time the function is executed
# because putting this logic in the module-level namespace may cause
# circular import errors. See Django ticket #1292.
global template_source_loaders
if template_source_loaders is None:
loaders = []
for path in settings.TEMPLATE_LOADERS:
i = path.rfind('.')
module, attr = path[:i], path[i+1:]
try:
mod = __import__(module, globals(), locals(), [attr])
except ImportError, e:
raise ImproperlyConfigured, 'Error importing template source loader %s: "%s"' % (module, e)
try:
func = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured, 'Module "%s" does not define a "%s" callable template source loader' % (module, attr)
if not func.is_usable:
import warnings
warnings.warn("Your TEMPLATE_LOADERS setting includes %r, but your Python installation doesn't support that type of template loading. Consider removing that line from TEMPLATE_LOADERS." % path)
else:
loaders.append(func)
template_source_loaders = tuple(loaders)
for loader in template_source_loaders:
try:
source, display_name = loader(name, dirs)
return (source, make_origin(display_name, loader, name, dirs))
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist, name
def get_template(template_name):
"""
Returns a compiled Template object for the given template name,
handling template inheritance recursively.
"""
source, origin = find_template_source(template_name)
template = get_template_from_string(source, origin, template_name)
return template
def get_template_from_string(source, origin=None, name=None):
"""
Returns a compiled Template object for the given template code,
handling template inheritance recursively.
"""
return Template(source, origin, name)
def render_to_string(template_name, dictionary=None, context_instance=None):
"""
Loads the given template_name and renders it with the given dictionary as
context. The template_name may be a string to load a single template using
get_template, or it may be a tuple to use select_template to find one of
the templates in the list. Returns a string.
"""
dictionary = dictionary or {}
if isinstance(template_name, (list, tuple)):
t = select_template(template_name)
else:
t = get_template(template_name)
if context_instance:
context_instance.update(dictionary)
else:
context_instance = Context(dictionary)
return t.render(context_instance)
def select_template(template_name_list):
"Given a list of template names, returns the first that can be loaded."
for template_name in template_name_list:
try:
return get_template(template_name)
except TemplateDoesNotExist:
continue
# If we get here, none of the templates could be loaded
raise TemplateDoesNotExist, ', '.join(template_name_list)
add_to_builtins('django.template.loader_tags')
|
maxamillion/ansible-modules-extras
|
refs/heads/devel
|
network/f5/bigip_hostname.py
|
18
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: bigip_hostname
short_description: Manage the hostname of a BIG-IP.
description:
- Manage the hostname of a BIG-IP.
version_added: "2.3"
options:
hostname:
description:
- Hostname of the BIG-IP host.
required: true
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
extends_documentation_fragment: f5
requirements:
- f5-sdk
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Set the hostname of the BIG-IP
bigip_hostname:
hostname: "bigip.localhost.localdomain"
password: "admin"
server: "bigip.localhost.localdomain"
user: "admin"
delegate_to: localhost
'''
RETURN = '''
hostname:
description: The new hostname of the device
returned: changed
type: string
sample: "big-ip01.internal"
'''
try:
from f5.bigip.contexts import TransactionContextManager
from f5.bigip import ManagementRoot
from icontrol.session import iControlUnexpectedHTTPError
HAS_F5SDK = True
except ImportError:
HAS_F5SDK = False
class BigIpHostnameManager(object):
def __init__(self, *args, **kwargs):
self.changed_params = dict()
self.params = kwargs
self.api = None
def connect_to_bigip(self, **kwargs):
return ManagementRoot(kwargs['server'],
kwargs['user'],
kwargs['password'],
port=kwargs['server_port'])
def ensure_hostname_is_present(self):
self.changed_params['hostname'] = self.params['hostname']
if self.params['check_mode']:
return True
tx = self.api.tm.transactions.transaction
with TransactionContextManager(tx) as api:
r = api.tm.sys.global_settings.load()
r.update(hostname=self.params['hostname'])
if self.hostname_exists():
return True
else:
raise F5ModuleError("Failed to set the hostname")
def hostname_exists(self):
if self.params['hostname'] == self.current_hostname():
return True
else:
return False
def present(self):
if self.hostname_exists():
return False
else:
return self.ensure_hostname_is_present()
def current_hostname(self):
r = self.api.tm.sys.global_settings.load()
return r.hostname
def apply_changes(self):
result = dict()
changed = self.apply_to_running_config()
if changed:
self.save_running_config()
result.update(**self.changed_params)
result.update(dict(changed=changed))
return result
def apply_to_running_config(self):
try:
self.api = self.connect_to_bigip(**self.params)
return self.present()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
def save_running_config(self):
self.api.tm.sys.config.exec_cmd('save')
class BigIpHostnameModuleConfig(object):
def __init__(self):
self.argument_spec = dict()
self.meta_args = dict()
self.supports_check_mode = True
self.initialize_meta_args()
self.initialize_argument_spec()
def initialize_meta_args(self):
args = dict(
hostname=dict(required=True)
)
self.meta_args = args
def initialize_argument_spec(self):
self.argument_spec = f5_argument_spec()
self.argument_spec.update(self.meta_args)
def create(self):
return AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=self.supports_check_mode
)
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
config = BigIpHostnameModuleConfig()
module = config.create()
try:
obj = BigIpHostnameManager(
check_mode=module.check_mode, **module.params
)
result = obj.apply_changes()
module.exit_json(**result)
except F5ModuleError as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.f5 import *
if __name__ == '__main__':
main()
|
nomadcube/scikit-learn
|
refs/heads/master
|
examples/cluster/plot_lena_ward_segmentation.py
|
271
|
"""
===============================================================
A demo of structured Ward hierarchical clustering on Lena image
===============================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
###############################################################################
# Generate data
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
X = np.reshape(lena, (-1, 1))
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*lena.shape)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward', connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, lena.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
###############################################################################
# Plot the results on an image
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
|
ITDevLtd/MCVirt
|
refs/heads/master
|
source/mcvirt-daemon/usr/lib/python2.7/dist-packages/mcvirt/thread/auto_start_watchdog.py
|
1
|
# Copyright (c) 2016 - I.T. Dev Ltd
#
# This file is part of MCVirt.
#
# MCVirt is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# MCVirt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MCVirt. If not, see <http://www.gnu.org/licenses/>
import Pyro4
from mcvirt.thread.repeat_timer import RepeatTimer
from mcvirt.constants import AutoStartStates
from mcvirt.rpc.expose_method import Expose
from mcvirt.argument_validator import ArgumentValidator
from mcvirt.auth.permissions import PERMISSIONS
from mcvirt.syslogger import Syslogger
class AutoStartWatchdog(RepeatTimer):
"""Object to perform regular checks to determine that VMs are running."""
@property
def interval(self):
"""Return the timer interval."""
return self.get_autostart_interval()
@Expose()
def get_autostart_interval(self):
"""Return the autostart interval for the node."""
return self.po__get_registered_object('mcvirt_config')().get_config()['autostart_interval']
@Expose(locking=True)
def set_autostart_interval(self, interval_time):
"""Update the autostart interval for the node."""
self.po__get_registered_object('auth').assert_permission(PERMISSIONS.MANAGE_NODE)
ArgumentValidator.validate_integer(interval_time)
interval_time = int(interval_time)
def update_config(config):
"""Update autostart interval in MCVirt config."""
config['autostart_interval'] = interval_time
self.po__get_registered_object('mcvirt_config')().update_config(update_config,
'Update autostart interval')
if self.po__is_cluster_master:
def remote_update(node):
"""Update autostart interval on remote node."""
autostart_watchdog = node.get_connection('autostart_watchdog')
autostart_watchdog.set_autostart_interval(interval_time)
cluster = self.po__get_registered_object('cluster')
cluster.run_remote_command(remote_update)
# If the timer has been set to 0, disable the timer
if interval_time == 0:
self.repeat = False
self.timer.cancel()
self.timer = None
else:
# Otherwise update the running timer
if self.timer is None:
self.repeat = True
self.repeat_run()
def initialise(self):
"""Perform the ON_BOOT autostart and start timer."""
Pyro4.current_context.INTERNAL_REQUEST = True
vm_factory = self.po__get_registered_object('virtual_machine_factory')
try:
vm_factory.autostart(AutoStartStates.ON_BOOT)
except Exception, exc:
Syslogger.logger().error('Error during autostart ON_BOOT: %s' % str(exc))
Pyro4.current_context.INTERNAL_REQUEST = False
super(AutoStartWatchdog, self).initialise()
def run(self):
"""Perform ON_POLL autostart."""
Pyro4.current_context.INTERNAL_REQUEST = True
vm_factory = self.po__get_registered_object('virtual_machine_factory')
try:
vm_factory.autostart(AutoStartStates.ON_POLL)
except Exception, exc:
Syslogger.logger().error('Error during autostart ON_POLL : %s' % str(exc))
Pyro4.current_context.INTERNAL_REQUEST = False
|
gandarez/wakatime
|
refs/heads/master
|
wakatime/packages/pygments_py3/pygments/lexers/templates.py
|
29
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.templates
~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for various template engines' markup.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexers.html import HtmlLexer, XmlLexer
from pygments.lexers.javascript import JavascriptLexer, LassoLexer
from pygments.lexers.css import CssLexer
from pygments.lexers.php import PhpLexer
from pygments.lexers.python import PythonLexer
from pygments.lexers.perl import PerlLexer
from pygments.lexers.jvm import JavaLexer, TeaLangLexer
from pygments.lexers.data import YamlLexer
from pygments.lexer import Lexer, DelegatingLexer, RegexLexer, bygroups, \
include, using, this, default, combined
from pygments.token import Error, Punctuation, Whitespace, \
Text, Comment, Operator, Keyword, Name, String, Number, Other, Token
from pygments.util import html_doctype_matches, looks_like_xml
__all__ = ['HtmlPhpLexer', 'XmlPhpLexer', 'CssPhpLexer',
'JavascriptPhpLexer', 'ErbLexer', 'RhtmlLexer',
'XmlErbLexer', 'CssErbLexer', 'JavascriptErbLexer',
'SmartyLexer', 'HtmlSmartyLexer', 'XmlSmartyLexer',
'CssSmartyLexer', 'JavascriptSmartyLexer', 'DjangoLexer',
'HtmlDjangoLexer', 'CssDjangoLexer', 'XmlDjangoLexer',
'JavascriptDjangoLexer', 'GenshiLexer', 'HtmlGenshiLexer',
'GenshiTextLexer', 'CssGenshiLexer', 'JavascriptGenshiLexer',
'MyghtyLexer', 'MyghtyHtmlLexer', 'MyghtyXmlLexer',
'MyghtyCssLexer', 'MyghtyJavascriptLexer', 'MasonLexer', 'MakoLexer',
'MakoHtmlLexer', 'MakoXmlLexer', 'MakoJavascriptLexer',
'MakoCssLexer', 'JspLexer', 'CheetahLexer', 'CheetahHtmlLexer',
'CheetahXmlLexer', 'CheetahJavascriptLexer', 'EvoqueLexer',
'EvoqueHtmlLexer', 'EvoqueXmlLexer', 'ColdfusionLexer',
'ColdfusionHtmlLexer', 'ColdfusionCFCLexer', 'VelocityLexer',
'VelocityHtmlLexer', 'VelocityXmlLexer', 'SspLexer',
'TeaTemplateLexer', 'LassoHtmlLexer', 'LassoXmlLexer',
'LassoCssLexer', 'LassoJavascriptLexer', 'HandlebarsLexer',
'HandlebarsHtmlLexer', 'YamlJinjaLexer', 'LiquidLexer',
'TwigLexer', 'TwigHtmlLexer']
class ErbLexer(Lexer):
"""
Generic `ERB <http://ruby-doc.org/core/classes/ERB.html>`_ (Ruby Templating)
lexer.
Just highlights ruby code between the preprocessor directives, other data
is left untouched by the lexer.
All options are also forwarded to the `RubyLexer`.
"""
name = 'ERB'
aliases = ['erb']
mimetypes = ['application/x-ruby-templating']
_block_re = re.compile(r'(<%%|%%>|<%=|<%#|<%-|<%|-%>|%>|^%[^%].*?$)', re.M)
def __init__(self, **options):
from pygments.lexers.ruby import RubyLexer
self.ruby_lexer = RubyLexer(**options)
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
"""
Since ERB doesn't allow "<%" and other tags inside of ruby
blocks we have to use a split approach here that fails for
that too.
"""
tokens = self._block_re.split(text)
tokens.reverse()
state = idx = 0
try:
while True:
# text
if state == 0:
val = tokens.pop()
yield idx, Other, val
idx += len(val)
state = 1
# block starts
elif state == 1:
tag = tokens.pop()
# literals
if tag in ('<%%', '%%>'):
yield idx, Other, tag
idx += 3
state = 0
# comment
elif tag == '<%#':
yield idx, Comment.Preproc, tag
val = tokens.pop()
yield idx + 3, Comment, val
idx += 3 + len(val)
state = 2
# blocks or output
elif tag in ('<%', '<%=', '<%-'):
yield idx, Comment.Preproc, tag
idx += len(tag)
data = tokens.pop()
r_idx = 0
for r_idx, r_token, r_value in \
self.ruby_lexer.get_tokens_unprocessed(data):
yield r_idx + idx, r_token, r_value
idx += len(data)
state = 2
elif tag in ('%>', '-%>'):
yield idx, Error, tag
idx += len(tag)
state = 0
# % raw ruby statements
else:
yield idx, Comment.Preproc, tag[0]
r_idx = 0
for r_idx, r_token, r_value in \
self.ruby_lexer.get_tokens_unprocessed(tag[1:]):
yield idx + 1 + r_idx, r_token, r_value
idx += len(tag)
state = 0
# block ends
elif state == 2:
tag = tokens.pop()
if tag not in ('%>', '-%>'):
yield idx, Other, tag
else:
yield idx, Comment.Preproc, tag
idx += len(tag)
state = 0
except IndexError:
return
def analyse_text(text):
if '<%' in text and '%>' in text:
return 0.4
class SmartyLexer(RegexLexer):
"""
Generic `Smarty <http://smarty.php.net/>`_ template lexer.
Just highlights smarty code between the preprocessor directives, other
data is left untouched by the lexer.
"""
name = 'Smarty'
aliases = ['smarty']
filenames = ['*.tpl']
mimetypes = ['application/x-smarty']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
(r'[^{]+', Other),
(r'(\{)(\*.*?\*)(\})',
bygroups(Comment.Preproc, Comment, Comment.Preproc)),
(r'(\{php\})(.*?)(\{/php\})',
bygroups(Comment.Preproc, using(PhpLexer, startinline=True),
Comment.Preproc)),
(r'(\{)(/?[a-zA-Z_]\w*)(\s*)',
bygroups(Comment.Preproc, Name.Function, Text), 'smarty'),
(r'\{', Comment.Preproc, 'smarty')
],
'smarty': [
(r'\s+', Text),
(r'\{', Comment.Preproc, '#push'),
(r'\}', Comment.Preproc, '#pop'),
(r'#[a-zA-Z_]\w*#', Name.Variable),
(r'\$[a-zA-Z_]\w*(\.\w+)*', Name.Variable),
(r'[~!%^&*()+=|\[\]:;,.<>/?@-]', Operator),
(r'(true|false|null)\b', Keyword.Constant),
(r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
r"0[xX][0-9a-fA-F]+[Ll]?", Number),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'[a-zA-Z_]\w*', Name.Attribute)
]
}
def analyse_text(text):
rv = 0.0
if re.search('\{if\s+.*?\}.*?\{/if\}', text):
rv += 0.15
if re.search('\{include\s+file=.*?\}', text):
rv += 0.15
if re.search('\{foreach\s+.*?\}.*?\{/foreach\}', text):
rv += 0.15
if re.search('\{\$.*?\}', text):
rv += 0.01
return rv
class VelocityLexer(RegexLexer):
"""
Generic `Velocity <http://velocity.apache.org/>`_ template lexer.
Just highlights velocity directives and variable references, other
data is left untouched by the lexer.
"""
name = 'Velocity'
aliases = ['velocity']
filenames = ['*.vm', '*.fhtml']
flags = re.MULTILINE | re.DOTALL
identifier = r'[a-zA-Z_]\w*'
tokens = {
'root': [
(r'[^{#$]+', Other),
(r'(#)(\*.*?\*)(#)',
bygroups(Comment.Preproc, Comment, Comment.Preproc)),
(r'(##)(.*?$)',
bygroups(Comment.Preproc, Comment)),
(r'(#\{?)(' + identifier + r')(\}?)(\s?\()',
bygroups(Comment.Preproc, Name.Function, Comment.Preproc, Punctuation),
'directiveparams'),
(r'(#\{?)(' + identifier + r')(\}|\b)',
bygroups(Comment.Preproc, Name.Function, Comment.Preproc)),
(r'\$\{?', Punctuation, 'variable')
],
'variable': [
(identifier, Name.Variable),
(r'\(', Punctuation, 'funcparams'),
(r'(\.)(' + identifier + r')',
bygroups(Punctuation, Name.Variable), '#push'),
(r'\}', Punctuation, '#pop'),
default('#pop')
],
'directiveparams': [
(r'(&&|\|\||==?|!=?|[-<>+*%&|^/])|\b(eq|ne|gt|lt|ge|le|not|in)\b',
Operator),
(r'\[', Operator, 'rangeoperator'),
(r'\b' + identifier + r'\b', Name.Function),
include('funcparams')
],
'rangeoperator': [
(r'\.\.', Operator),
include('funcparams'),
(r'\]', Operator, '#pop')
],
'funcparams': [
(r'\$\{?', Punctuation, 'variable'),
(r'\s+', Text),
(r',', Punctuation),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r"0[xX][0-9a-fA-F]+[Ll]?", Number),
(r"\b[0-9]+\b", Number),
(r'(true|false|null)\b', Keyword.Constant),
(r'\(', Punctuation, '#push'),
(r'\)', Punctuation, '#pop'),
(r'\[', Punctuation, '#push'),
(r'\]', Punctuation, '#pop'),
]
}
def analyse_text(text):
rv = 0.0
if re.search(r'#\{?macro\}?\(.*?\).*?#\{?end\}?', text):
rv += 0.25
if re.search(r'#\{?if\}?\(.+?\).*?#\{?end\}?', text):
rv += 0.15
if re.search(r'#\{?foreach\}?\(.+?\).*?#\{?end\}?', text):
rv += 0.15
if re.search(r'\$\{?[a-zA-Z_]\w*(\([^)]*\))?'
r'(\.\w+(\([^)]*\))?)*\}?', text):
rv += 0.01
return rv
class VelocityHtmlLexer(DelegatingLexer):
"""
Subclass of the `VelocityLexer` that highlights unlexed data
with the `HtmlLexer`.
"""
name = 'HTML+Velocity'
aliases = ['html+velocity']
alias_filenames = ['*.html', '*.fhtml']
mimetypes = ['text/html+velocity']
def __init__(self, **options):
super(VelocityHtmlLexer, self).__init__(HtmlLexer, VelocityLexer,
**options)
class VelocityXmlLexer(DelegatingLexer):
"""
Subclass of the `VelocityLexer` that highlights unlexed data
with the `XmlLexer`.
"""
name = 'XML+Velocity'
aliases = ['xml+velocity']
alias_filenames = ['*.xml', '*.vm']
mimetypes = ['application/xml+velocity']
def __init__(self, **options):
super(VelocityXmlLexer, self).__init__(XmlLexer, VelocityLexer,
**options)
def analyse_text(text):
rv = VelocityLexer.analyse_text(text) - 0.01
if looks_like_xml(text):
rv += 0.4
return rv
class DjangoLexer(RegexLexer):
"""
Generic `django <http://www.djangoproject.com/documentation/templates/>`_
and `jinja <http://wsgiarea.pocoo.org/jinja/>`_ template lexer.
It just highlights django/jinja code between the preprocessor directives,
other data is left untouched by the lexer.
"""
name = 'Django/Jinja'
aliases = ['django', 'jinja']
mimetypes = ['application/x-django-templating', 'application/x-jinja']
flags = re.M | re.S
tokens = {
'root': [
(r'[^{]+', Other),
(r'\{\{', Comment.Preproc, 'var'),
# jinja/django comments
(r'\{[*#].*?[*#]\}', Comment),
# django comments
(r'(\{%)(-?\s*)(comment)(\s*-?)(%\})(.*?)'
r'(\{%)(-?\s*)(endcomment)(\s*-?)(%\})',
bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
Comment, Comment.Preproc, Text, Keyword, Text,
Comment.Preproc)),
# raw jinja blocks
(r'(\{%)(-?\s*)(raw)(\s*-?)(%\})(.*?)'
r'(\{%)(-?\s*)(endraw)(\s*-?)(%\})',
bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
Text, Comment.Preproc, Text, Keyword, Text,
Comment.Preproc)),
# filter blocks
(r'(\{%)(-?\s*)(filter)(\s+)([a-zA-Z_]\w*)',
bygroups(Comment.Preproc, Text, Keyword, Text, Name.Function),
'block'),
(r'(\{%)(-?\s*)([a-zA-Z_]\w*)',
bygroups(Comment.Preproc, Text, Keyword), 'block'),
(r'\{', Other)
],
'varnames': [
(r'(\|)(\s*)([a-zA-Z_]\w*)',
bygroups(Operator, Text, Name.Function)),
(r'(is)(\s+)(not)?(\s+)?([a-zA-Z_]\w*)',
bygroups(Keyword, Text, Keyword, Text, Name.Function)),
(r'(_|true|false|none|True|False|None)\b', Keyword.Pseudo),
(r'(in|as|reversed|recursive|not|and|or|is|if|else|import|'
r'with(?:(?:out)?\s*context)?|scoped|ignore\s+missing)\b',
Keyword),
(r'(loop|block|super|forloop)\b', Name.Builtin),
(r'[a-zA-Z][\w-]*', Name.Variable),
(r'\.\w+', Name.Variable),
(r':?"(\\\\|\\"|[^"])*"', String.Double),
(r":?'(\\\\|\\'|[^'])*'", String.Single),
(r'([{}()\[\]+\-*/,:~]|[><=]=?)', Operator),
(r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
r"0[xX][0-9a-fA-F]+[Ll]?", Number),
],
'var': [
(r'\s+', Text),
(r'(-?)(\}\})', bygroups(Text, Comment.Preproc), '#pop'),
include('varnames')
],
'block': [
(r'\s+', Text),
(r'(-?)(%\})', bygroups(Text, Comment.Preproc), '#pop'),
include('varnames'),
(r'.', Punctuation)
]
}
def analyse_text(text):
rv = 0.0
if re.search(r'\{%\s*(block|extends)', text) is not None:
rv += 0.4
if re.search(r'\{%\s*if\s*.*?%\}', text) is not None:
rv += 0.1
if re.search(r'\{\{.*?\}\}', text) is not None:
rv += 0.1
return rv
class MyghtyLexer(RegexLexer):
"""
Generic `myghty templates`_ lexer. Code that isn't Myghty
markup is yielded as `Token.Other`.
.. versionadded:: 0.6
.. _myghty templates: http://www.myghty.org/
"""
name = 'Myghty'
aliases = ['myghty']
filenames = ['*.myt', 'autodelegate']
mimetypes = ['application/x-myghty']
tokens = {
'root': [
(r'\s+', Text),
(r'(<%(?:def|method))(\s*)(.*?)(>)(.*?)(</%\2\s*>)(?s)',
bygroups(Name.Tag, Text, Name.Function, Name.Tag,
using(this), Name.Tag)),
(r'(<%\w+)(.*?)(>)(.*?)(</%\2\s*>)(?s)',
bygroups(Name.Tag, Name.Function, Name.Tag,
using(PythonLexer), Name.Tag)),
(r'(<&[^|])(.*?)(,.*?)?(&>)',
bygroups(Name.Tag, Name.Function, using(PythonLexer), Name.Tag)),
(r'(<&\|)(.*?)(,.*?)?(&>)(?s)',
bygroups(Name.Tag, Name.Function, using(PythonLexer), Name.Tag)),
(r'</&>', Name.Tag),
(r'(<%!?)(.*?)(%>)(?s)',
bygroups(Name.Tag, using(PythonLexer), Name.Tag)),
(r'(?<=^)#[^\n]*(\n|\Z)', Comment),
(r'(?<=^)(%)([^\n]*)(\n|\Z)',
bygroups(Name.Tag, using(PythonLexer), Other)),
(r"""(?sx)
(.+?) # anything, followed by:
(?:
(?<=\n)(?=[%#]) | # an eval or comment line
(?=</?[%&]) | # a substitution or block or
# call start or end
# - don't consume
(\\\n) | # an escaped newline
\Z # end of string
)""", bygroups(Other, Operator)),
]
}
class MyghtyHtmlLexer(DelegatingLexer):
"""
Subclass of the `MyghtyLexer` that highlights unlexed data
with the `HtmlLexer`.
.. versionadded:: 0.6
"""
name = 'HTML+Myghty'
aliases = ['html+myghty']
mimetypes = ['text/html+myghty']
def __init__(self, **options):
super(MyghtyHtmlLexer, self).__init__(HtmlLexer, MyghtyLexer,
**options)
class MyghtyXmlLexer(DelegatingLexer):
"""
Subclass of the `MyghtyLexer` that highlights unlexed data
with the `XmlLexer`.
.. versionadded:: 0.6
"""
name = 'XML+Myghty'
aliases = ['xml+myghty']
mimetypes = ['application/xml+myghty']
def __init__(self, **options):
super(MyghtyXmlLexer, self).__init__(XmlLexer, MyghtyLexer,
**options)
class MyghtyJavascriptLexer(DelegatingLexer):
"""
Subclass of the `MyghtyLexer` that highlights unlexed data
with the `JavascriptLexer`.
.. versionadded:: 0.6
"""
name = 'JavaScript+Myghty'
aliases = ['js+myghty', 'javascript+myghty']
mimetypes = ['application/x-javascript+myghty',
'text/x-javascript+myghty',
'text/javascript+mygthy']
def __init__(self, **options):
super(MyghtyJavascriptLexer, self).__init__(JavascriptLexer,
MyghtyLexer, **options)
class MyghtyCssLexer(DelegatingLexer):
"""
Subclass of the `MyghtyLexer` that highlights unlexed data
with the `CssLexer`.
.. versionadded:: 0.6
"""
name = 'CSS+Myghty'
aliases = ['css+myghty']
mimetypes = ['text/css+myghty']
def __init__(self, **options):
super(MyghtyCssLexer, self).__init__(CssLexer, MyghtyLexer,
**options)
class MasonLexer(RegexLexer):
"""
Generic `mason templates`_ lexer. Stolen from Myghty lexer. Code that isn't
Mason markup is HTML.
.. _mason templates: http://www.masonhq.com/
.. versionadded:: 1.4
"""
name = 'Mason'
aliases = ['mason']
filenames = ['*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler']
mimetypes = ['application/x-mason']
tokens = {
'root': [
(r'\s+', Text),
(r'(<%doc>)(.*?)(</%doc>)(?s)',
bygroups(Name.Tag, Comment.Multiline, Name.Tag)),
(r'(<%(?:def|method))(\s*)(.*?)(>)(.*?)(</%\2\s*>)(?s)',
bygroups(Name.Tag, Text, Name.Function, Name.Tag,
using(this), Name.Tag)),
(r'(<%\w+)(.*?)(>)(.*?)(</%\2\s*>)(?s)',
bygroups(Name.Tag, Name.Function, Name.Tag,
using(PerlLexer), Name.Tag)),
(r'(<&[^|])(.*?)(,.*?)?(&>)(?s)',
bygroups(Name.Tag, Name.Function, using(PerlLexer), Name.Tag)),
(r'(<&\|)(.*?)(,.*?)?(&>)(?s)',
bygroups(Name.Tag, Name.Function, using(PerlLexer), Name.Tag)),
(r'</&>', Name.Tag),
(r'(<%!?)(.*?)(%>)(?s)',
bygroups(Name.Tag, using(PerlLexer), Name.Tag)),
(r'(?<=^)#[^\n]*(\n|\Z)', Comment),
(r'(?<=^)(%)([^\n]*)(\n|\Z)',
bygroups(Name.Tag, using(PerlLexer), Other)),
(r"""(?sx)
(.+?) # anything, followed by:
(?:
(?<=\n)(?=[%#]) | # an eval or comment line
(?=</?[%&]) | # a substitution or block or
# call start or end
# - don't consume
(\\\n) | # an escaped newline
\Z # end of string
)""", bygroups(using(HtmlLexer), Operator)),
]
}
def analyse_text(text):
rv = 0.0
if re.search('<&', text) is not None:
rv = 1.0
return rv
class MakoLexer(RegexLexer):
"""
Generic `mako templates`_ lexer. Code that isn't Mako
markup is yielded as `Token.Other`.
.. versionadded:: 0.7
.. _mako templates: http://www.makotemplates.org/
"""
name = 'Mako'
aliases = ['mako']
filenames = ['*.mao']
mimetypes = ['application/x-mako']
tokens = {
'root': [
(r'(\s*)(%)(\s*end(?:\w+))(\n|\Z)',
bygroups(Text, Comment.Preproc, Keyword, Other)),
(r'(\s*)(%)([^\n]*)(\n|\Z)',
bygroups(Text, Comment.Preproc, using(PythonLexer), Other)),
(r'(\s*)(##[^\n]*)(\n|\Z)',
bygroups(Text, Comment.Preproc, Other)),
(r'(?s)<%doc>.*?</%doc>', Comment.Preproc),
(r'(<%)([\w.:]+)',
bygroups(Comment.Preproc, Name.Builtin), 'tag'),
(r'(</%)([\w.:]+)(>)',
bygroups(Comment.Preproc, Name.Builtin, Comment.Preproc)),
(r'<%(?=([\w.:]+))', Comment.Preproc, 'ondeftags'),
(r'(<%(?:!?))(.*?)(%>)(?s)',
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
(r'(\$\{)(.*?)(\})',
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
(r'''(?sx)
(.+?) # anything, followed by:
(?:
(?<=\n)(?=%|\#\#) | # an eval or comment line
(?=\#\*) | # multiline comment
(?=</?%) | # a python block
# call start or end
(?=\$\{) | # a substitution
(?<=\n)(?=\s*%) |
# - don't consume
(\\\n) | # an escaped newline
\Z # end of string
)
''', bygroups(Other, Operator)),
(r'\s+', Text),
],
'ondeftags': [
(r'<%', Comment.Preproc),
(r'(?<=<%)(include|inherit|namespace|page)', Name.Builtin),
include('tag'),
],
'tag': [
(r'((?:\w+)\s*=)(\s*)(".*?")',
bygroups(Name.Attribute, Text, String)),
(r'/?\s*>', Comment.Preproc, '#pop'),
(r'\s+', Text),
],
'attr': [
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
class MakoHtmlLexer(DelegatingLexer):
"""
Subclass of the `MakoLexer` that highlights unlexed data
with the `HtmlLexer`.
.. versionadded:: 0.7
"""
name = 'HTML+Mako'
aliases = ['html+mako']
mimetypes = ['text/html+mako']
def __init__(self, **options):
super(MakoHtmlLexer, self).__init__(HtmlLexer, MakoLexer,
**options)
class MakoXmlLexer(DelegatingLexer):
"""
Subclass of the `MakoLexer` that highlights unlexed data
with the `XmlLexer`.
.. versionadded:: 0.7
"""
name = 'XML+Mako'
aliases = ['xml+mako']
mimetypes = ['application/xml+mako']
def __init__(self, **options):
super(MakoXmlLexer, self).__init__(XmlLexer, MakoLexer,
**options)
class MakoJavascriptLexer(DelegatingLexer):
"""
Subclass of the `MakoLexer` that highlights unlexed data
with the `JavascriptLexer`.
.. versionadded:: 0.7
"""
name = 'JavaScript+Mako'
aliases = ['js+mako', 'javascript+mako']
mimetypes = ['application/x-javascript+mako',
'text/x-javascript+mako',
'text/javascript+mako']
def __init__(self, **options):
super(MakoJavascriptLexer, self).__init__(JavascriptLexer,
MakoLexer, **options)
class MakoCssLexer(DelegatingLexer):
"""
Subclass of the `MakoLexer` that highlights unlexed data
with the `CssLexer`.
.. versionadded:: 0.7
"""
name = 'CSS+Mako'
aliases = ['css+mako']
mimetypes = ['text/css+mako']
def __init__(self, **options):
super(MakoCssLexer, self).__init__(CssLexer, MakoLexer,
**options)
# Genshi and Cheetah lexers courtesy of Matt Good.
class CheetahPythonLexer(Lexer):
"""
Lexer for handling Cheetah's special $ tokens in Python syntax.
"""
def get_tokens_unprocessed(self, text):
pylexer = PythonLexer(**self.options)
for pos, type_, value in pylexer.get_tokens_unprocessed(text):
if type_ == Token.Error and value == '$':
type_ = Comment.Preproc
yield pos, type_, value
class CheetahLexer(RegexLexer):
"""
Generic `cheetah templates`_ lexer. Code that isn't Cheetah
markup is yielded as `Token.Other`. This also works for
`spitfire templates`_ which use the same syntax.
.. _cheetah templates: http://www.cheetahtemplate.org/
.. _spitfire templates: http://code.google.com/p/spitfire/
"""
name = 'Cheetah'
aliases = ['cheetah', 'spitfire']
filenames = ['*.tmpl', '*.spt']
mimetypes = ['application/x-cheetah', 'application/x-spitfire']
tokens = {
'root': [
(r'(##[^\n]*)$',
(bygroups(Comment))),
(r'#[*](.|\n)*?[*]#', Comment),
(r'#end[^#\n]*(?:#|$)', Comment.Preproc),
(r'#slurp$', Comment.Preproc),
(r'(#[a-zA-Z]+)([^#\n]*)(#|$)',
(bygroups(Comment.Preproc, using(CheetahPythonLexer),
Comment.Preproc))),
# TODO support other Python syntax like $foo['bar']
(r'(\$)([a-zA-Z_][\w.]*\w)',
bygroups(Comment.Preproc, using(CheetahPythonLexer))),
(r'(\$\{!?)(.*?)(\})(?s)',
bygroups(Comment.Preproc, using(CheetahPythonLexer),
Comment.Preproc)),
(r'''(?sx)
(.+?) # anything, followed by:
(?:
(?=\#[#a-zA-Z]*) | # an eval comment
(?=\$[a-zA-Z_{]) | # a substitution
\Z # end of string
)
''', Other),
(r'\s+', Text),
],
}
class CheetahHtmlLexer(DelegatingLexer):
"""
Subclass of the `CheetahLexer` that highlights unlexed data
with the `HtmlLexer`.
"""
name = 'HTML+Cheetah'
aliases = ['html+cheetah', 'html+spitfire', 'htmlcheetah']
mimetypes = ['text/html+cheetah', 'text/html+spitfire']
def __init__(self, **options):
super(CheetahHtmlLexer, self).__init__(HtmlLexer, CheetahLexer,
**options)
class CheetahXmlLexer(DelegatingLexer):
"""
Subclass of the `CheetahLexer` that highlights unlexed data
with the `XmlLexer`.
"""
name = 'XML+Cheetah'
aliases = ['xml+cheetah', 'xml+spitfire']
mimetypes = ['application/xml+cheetah', 'application/xml+spitfire']
def __init__(self, **options):
super(CheetahXmlLexer, self).__init__(XmlLexer, CheetahLexer,
**options)
class CheetahJavascriptLexer(DelegatingLexer):
"""
Subclass of the `CheetahLexer` that highlights unlexed data
with the `JavascriptLexer`.
"""
name = 'JavaScript+Cheetah'
aliases = ['js+cheetah', 'javascript+cheetah',
'js+spitfire', 'javascript+spitfire']
mimetypes = ['application/x-javascript+cheetah',
'text/x-javascript+cheetah',
'text/javascript+cheetah',
'application/x-javascript+spitfire',
'text/x-javascript+spitfire',
'text/javascript+spitfire']
def __init__(self, **options):
super(CheetahJavascriptLexer, self).__init__(JavascriptLexer,
CheetahLexer, **options)
class GenshiTextLexer(RegexLexer):
"""
A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ text
templates.
"""
name = 'Genshi Text'
aliases = ['genshitext']
mimetypes = ['application/x-genshi-text', 'text/x-genshi']
tokens = {
'root': [
(r'[^#$\s]+', Other),
(r'^(\s*)(##.*)$', bygroups(Text, Comment)),
(r'^(\s*)(#)', bygroups(Text, Comment.Preproc), 'directive'),
include('variable'),
(r'[#$\s]', Other),
],
'directive': [
(r'\n', Text, '#pop'),
(r'(?:def|for|if)\s+.*', using(PythonLexer), '#pop'),
(r'(choose|when|with)([^\S\n]+)(.*)',
bygroups(Keyword, Text, using(PythonLexer)), '#pop'),
(r'(choose|otherwise)\b', Keyword, '#pop'),
(r'(end\w*)([^\S\n]*)(.*)', bygroups(Keyword, Text, Comment), '#pop'),
],
'variable': [
(r'(?<!\$)(\$\{)(.+?)(\})',
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
(r'(?<!\$)(\$)([a-zA-Z_][\w.]*)',
Name.Variable),
]
}
class GenshiMarkupLexer(RegexLexer):
"""
Base lexer for Genshi markup, used by `HtmlGenshiLexer` and
`GenshiLexer`.
"""
flags = re.DOTALL
tokens = {
'root': [
(r'[^<$]+', Other),
(r'(<\?python)(.*?)(\?>)',
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
# yield style and script blocks as Other
(r'<\s*(script|style)\s*.*?>.*?<\s*/\1\s*>', Other),
(r'<\s*py:[a-zA-Z0-9]+', Name.Tag, 'pytag'),
(r'<\s*[a-zA-Z0-9:]+', Name.Tag, 'tag'),
include('variable'),
(r'[<$]', Other),
],
'pytag': [
(r'\s+', Text),
(r'[\w:-]+\s*=', Name.Attribute, 'pyattr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'pyattr': [
('(")(.*?)(")', bygroups(String, using(PythonLexer), String), '#pop'),
("(')(.*?)(')", bygroups(String, using(PythonLexer), String), '#pop'),
(r'[^\s>]+', String, '#pop'),
],
'tag': [
(r'\s+', Text),
(r'py:[\w-]+\s*=', Name.Attribute, 'pyattr'),
(r'[\w:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'attr': [
('"', String, 'attr-dstring'),
("'", String, 'attr-sstring'),
(r'[^\s>]*', String, '#pop')
],
'attr-dstring': [
('"', String, '#pop'),
include('strings'),
("'", String)
],
'attr-sstring': [
("'", String, '#pop'),
include('strings'),
("'", String)
],
'strings': [
('[^"\'$]+', String),
include('variable')
],
'variable': [
(r'(?<!\$)(\$\{)(.+?)(\})',
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
(r'(?<!\$)(\$)([a-zA-Z_][\w\.]*)',
Name.Variable),
]
}
class HtmlGenshiLexer(DelegatingLexer):
"""
A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ and
`kid <http://kid-templating.org/>`_ kid HTML templates.
"""
name = 'HTML+Genshi'
aliases = ['html+genshi', 'html+kid']
alias_filenames = ['*.html', '*.htm', '*.xhtml']
mimetypes = ['text/html+genshi']
def __init__(self, **options):
super(HtmlGenshiLexer, self).__init__(HtmlLexer, GenshiMarkupLexer,
**options)
def analyse_text(text):
rv = 0.0
if re.search('\$\{.*?\}', text) is not None:
rv += 0.2
if re.search('py:(.*?)=["\']', text) is not None:
rv += 0.2
return rv + HtmlLexer.analyse_text(text) - 0.01
class GenshiLexer(DelegatingLexer):
"""
A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ and
`kid <http://kid-templating.org/>`_ kid XML templates.
"""
name = 'Genshi'
aliases = ['genshi', 'kid', 'xml+genshi', 'xml+kid']
filenames = ['*.kid']
alias_filenames = ['*.xml']
mimetypes = ['application/x-genshi', 'application/x-kid']
def __init__(self, **options):
super(GenshiLexer, self).__init__(XmlLexer, GenshiMarkupLexer,
**options)
def analyse_text(text):
rv = 0.0
if re.search('\$\{.*?\}', text) is not None:
rv += 0.2
if re.search('py:(.*?)=["\']', text) is not None:
rv += 0.2
return rv + XmlLexer.analyse_text(text) - 0.01
class JavascriptGenshiLexer(DelegatingLexer):
"""
A lexer that highlights javascript code in genshi text templates.
"""
name = 'JavaScript+Genshi Text'
aliases = ['js+genshitext', 'js+genshi', 'javascript+genshitext',
'javascript+genshi']
alias_filenames = ['*.js']
mimetypes = ['application/x-javascript+genshi',
'text/x-javascript+genshi',
'text/javascript+genshi']
def __init__(self, **options):
super(JavascriptGenshiLexer, self).__init__(JavascriptLexer,
GenshiTextLexer,
**options)
def analyse_text(text):
return GenshiLexer.analyse_text(text) - 0.05
class CssGenshiLexer(DelegatingLexer):
"""
A lexer that highlights CSS definitions in genshi text templates.
"""
name = 'CSS+Genshi Text'
aliases = ['css+genshitext', 'css+genshi']
alias_filenames = ['*.css']
mimetypes = ['text/css+genshi']
def __init__(self, **options):
super(CssGenshiLexer, self).__init__(CssLexer, GenshiTextLexer,
**options)
def analyse_text(text):
return GenshiLexer.analyse_text(text) - 0.05
class RhtmlLexer(DelegatingLexer):
"""
Subclass of the ERB lexer that highlights the unlexed data with the
html lexer.
Nested Javascript and CSS is highlighted too.
"""
name = 'RHTML'
aliases = ['rhtml', 'html+erb', 'html+ruby']
filenames = ['*.rhtml']
alias_filenames = ['*.html', '*.htm', '*.xhtml']
mimetypes = ['text/html+ruby']
def __init__(self, **options):
super(RhtmlLexer, self).__init__(HtmlLexer, ErbLexer, **options)
def analyse_text(text):
rv = ErbLexer.analyse_text(text) - 0.01
if html_doctype_matches(text):
# one more than the XmlErbLexer returns
rv += 0.5
return rv
class XmlErbLexer(DelegatingLexer):
"""
Subclass of `ErbLexer` which highlights data outside preprocessor
directives with the `XmlLexer`.
"""
name = 'XML+Ruby'
aliases = ['xml+erb', 'xml+ruby']
alias_filenames = ['*.xml']
mimetypes = ['application/xml+ruby']
def __init__(self, **options):
super(XmlErbLexer, self).__init__(XmlLexer, ErbLexer, **options)
def analyse_text(text):
rv = ErbLexer.analyse_text(text) - 0.01
if looks_like_xml(text):
rv += 0.4
return rv
class CssErbLexer(DelegatingLexer):
"""
Subclass of `ErbLexer` which highlights unlexed data with the `CssLexer`.
"""
name = 'CSS+Ruby'
aliases = ['css+erb', 'css+ruby']
alias_filenames = ['*.css']
mimetypes = ['text/css+ruby']
def __init__(self, **options):
super(CssErbLexer, self).__init__(CssLexer, ErbLexer, **options)
def analyse_text(text):
return ErbLexer.analyse_text(text) - 0.05
class JavascriptErbLexer(DelegatingLexer):
"""
Subclass of `ErbLexer` which highlights unlexed data with the
`JavascriptLexer`.
"""
name = 'JavaScript+Ruby'
aliases = ['js+erb', 'javascript+erb', 'js+ruby', 'javascript+ruby']
alias_filenames = ['*.js']
mimetypes = ['application/x-javascript+ruby',
'text/x-javascript+ruby',
'text/javascript+ruby']
def __init__(self, **options):
super(JavascriptErbLexer, self).__init__(JavascriptLexer, ErbLexer,
**options)
def analyse_text(text):
return ErbLexer.analyse_text(text) - 0.05
class HtmlPhpLexer(DelegatingLexer):
"""
Subclass of `PhpLexer` that highlights unhandled data with the `HtmlLexer`.
Nested Javascript and CSS is highlighted too.
"""
name = 'HTML+PHP'
aliases = ['html+php']
filenames = ['*.phtml']
alias_filenames = ['*.php', '*.html', '*.htm', '*.xhtml',
'*.php[345]']
mimetypes = ['application/x-php',
'application/x-httpd-php', 'application/x-httpd-php3',
'application/x-httpd-php4', 'application/x-httpd-php5']
def __init__(self, **options):
super(HtmlPhpLexer, self).__init__(HtmlLexer, PhpLexer, **options)
def analyse_text(text):
rv = PhpLexer.analyse_text(text) - 0.01
if html_doctype_matches(text):
rv += 0.5
return rv
class XmlPhpLexer(DelegatingLexer):
"""
Subclass of `PhpLexer` that higlights unhandled data with the `XmlLexer`.
"""
name = 'XML+PHP'
aliases = ['xml+php']
alias_filenames = ['*.xml', '*.php', '*.php[345]']
mimetypes = ['application/xml+php']
def __init__(self, **options):
super(XmlPhpLexer, self).__init__(XmlLexer, PhpLexer, **options)
def analyse_text(text):
rv = PhpLexer.analyse_text(text) - 0.01
if looks_like_xml(text):
rv += 0.4
return rv
class CssPhpLexer(DelegatingLexer):
"""
Subclass of `PhpLexer` which highlights unmatched data with the `CssLexer`.
"""
name = 'CSS+PHP'
aliases = ['css+php']
alias_filenames = ['*.css']
mimetypes = ['text/css+php']
def __init__(self, **options):
super(CssPhpLexer, self).__init__(CssLexer, PhpLexer, **options)
def analyse_text(text):
return PhpLexer.analyse_text(text) - 0.05
class JavascriptPhpLexer(DelegatingLexer):
"""
Subclass of `PhpLexer` which highlights unmatched data with the
`JavascriptLexer`.
"""
name = 'JavaScript+PHP'
aliases = ['js+php', 'javascript+php']
alias_filenames = ['*.js']
mimetypes = ['application/x-javascript+php',
'text/x-javascript+php',
'text/javascript+php']
def __init__(self, **options):
super(JavascriptPhpLexer, self).__init__(JavascriptLexer, PhpLexer,
**options)
def analyse_text(text):
return PhpLexer.analyse_text(text)
class HtmlSmartyLexer(DelegatingLexer):
"""
Subclass of the `SmartyLexer` that highighlights unlexed data with the
`HtmlLexer`.
Nested Javascript and CSS is highlighted too.
"""
name = 'HTML+Smarty'
aliases = ['html+smarty']
alias_filenames = ['*.html', '*.htm', '*.xhtml', '*.tpl']
mimetypes = ['text/html+smarty']
def __init__(self, **options):
super(HtmlSmartyLexer, self).__init__(HtmlLexer, SmartyLexer, **options)
def analyse_text(text):
rv = SmartyLexer.analyse_text(text) - 0.01
if html_doctype_matches(text):
rv += 0.5
return rv
class XmlSmartyLexer(DelegatingLexer):
"""
Subclass of the `SmartyLexer` that highlights unlexed data with the
`XmlLexer`.
"""
name = 'XML+Smarty'
aliases = ['xml+smarty']
alias_filenames = ['*.xml', '*.tpl']
mimetypes = ['application/xml+smarty']
def __init__(self, **options):
super(XmlSmartyLexer, self).__init__(XmlLexer, SmartyLexer, **options)
def analyse_text(text):
rv = SmartyLexer.analyse_text(text) - 0.01
if looks_like_xml(text):
rv += 0.4
return rv
class CssSmartyLexer(DelegatingLexer):
"""
Subclass of the `SmartyLexer` that highlights unlexed data with the
`CssLexer`.
"""
name = 'CSS+Smarty'
aliases = ['css+smarty']
alias_filenames = ['*.css', '*.tpl']
mimetypes = ['text/css+smarty']
def __init__(self, **options):
super(CssSmartyLexer, self).__init__(CssLexer, SmartyLexer, **options)
def analyse_text(text):
return SmartyLexer.analyse_text(text) - 0.05
class JavascriptSmartyLexer(DelegatingLexer):
"""
Subclass of the `SmartyLexer` that highlights unlexed data with the
`JavascriptLexer`.
"""
name = 'JavaScript+Smarty'
aliases = ['js+smarty', 'javascript+smarty']
alias_filenames = ['*.js', '*.tpl']
mimetypes = ['application/x-javascript+smarty',
'text/x-javascript+smarty',
'text/javascript+smarty']
def __init__(self, **options):
super(JavascriptSmartyLexer, self).__init__(JavascriptLexer, SmartyLexer,
**options)
def analyse_text(text):
return SmartyLexer.analyse_text(text) - 0.05
class HtmlDjangoLexer(DelegatingLexer):
"""
Subclass of the `DjangoLexer` that highighlights unlexed data with the
`HtmlLexer`.
Nested Javascript and CSS is highlighted too.
"""
name = 'HTML+Django/Jinja'
aliases = ['html+django', 'html+jinja', 'htmldjango']
alias_filenames = ['*.html', '*.htm', '*.xhtml']
mimetypes = ['text/html+django', 'text/html+jinja']
def __init__(self, **options):
super(HtmlDjangoLexer, self).__init__(HtmlLexer, DjangoLexer, **options)
def analyse_text(text):
rv = DjangoLexer.analyse_text(text) - 0.01
if html_doctype_matches(text):
rv += 0.5
return rv
class XmlDjangoLexer(DelegatingLexer):
"""
Subclass of the `DjangoLexer` that highlights unlexed data with the
`XmlLexer`.
"""
name = 'XML+Django/Jinja'
aliases = ['xml+django', 'xml+jinja']
alias_filenames = ['*.xml']
mimetypes = ['application/xml+django', 'application/xml+jinja']
def __init__(self, **options):
super(XmlDjangoLexer, self).__init__(XmlLexer, DjangoLexer, **options)
def analyse_text(text):
rv = DjangoLexer.analyse_text(text) - 0.01
if looks_like_xml(text):
rv += 0.4
return rv
class CssDjangoLexer(DelegatingLexer):
"""
Subclass of the `DjangoLexer` that highlights unlexed data with the
`CssLexer`.
"""
name = 'CSS+Django/Jinja'
aliases = ['css+django', 'css+jinja']
alias_filenames = ['*.css']
mimetypes = ['text/css+django', 'text/css+jinja']
def __init__(self, **options):
super(CssDjangoLexer, self).__init__(CssLexer, DjangoLexer, **options)
def analyse_text(text):
return DjangoLexer.analyse_text(text) - 0.05
class JavascriptDjangoLexer(DelegatingLexer):
"""
Subclass of the `DjangoLexer` that highlights unlexed data with the
`JavascriptLexer`.
"""
name = 'JavaScript+Django/Jinja'
aliases = ['js+django', 'javascript+django',
'js+jinja', 'javascript+jinja']
alias_filenames = ['*.js']
mimetypes = ['application/x-javascript+django',
'application/x-javascript+jinja',
'text/x-javascript+django',
'text/x-javascript+jinja',
'text/javascript+django',
'text/javascript+jinja']
def __init__(self, **options):
super(JavascriptDjangoLexer, self).__init__(JavascriptLexer, DjangoLexer,
**options)
def analyse_text(text):
return DjangoLexer.analyse_text(text) - 0.05
class JspRootLexer(RegexLexer):
"""
Base for the `JspLexer`. Yields `Token.Other` for area outside of
JSP tags.
.. versionadded:: 0.7
"""
tokens = {
'root': [
(r'<%\S?', Keyword, 'sec'),
# FIXME: I want to make these keywords but still parse attributes.
(r'</?jsp:(forward|getProperty|include|plugin|setProperty|useBean).*?>',
Keyword),
(r'[^<]+', Other),
(r'<', Other),
],
'sec': [
(r'%>', Keyword, '#pop'),
# note: '\w\W' != '.' without DOTALL.
(r'[\w\W]+?(?=%>|\Z)', using(JavaLexer)),
],
}
class JspLexer(DelegatingLexer):
"""
Lexer for Java Server Pages.
.. versionadded:: 0.7
"""
name = 'Java Server Page'
aliases = ['jsp']
filenames = ['*.jsp']
mimetypes = ['application/x-jsp']
def __init__(self, **options):
super(JspLexer, self).__init__(XmlLexer, JspRootLexer, **options)
def analyse_text(text):
rv = JavaLexer.analyse_text(text) - 0.01
if looks_like_xml(text):
rv += 0.4
if '<%' in text and '%>' in text:
rv += 0.1
return rv
class EvoqueLexer(RegexLexer):
"""
For files using the Evoque templating system.
.. versionadded:: 1.1
"""
name = 'Evoque'
aliases = ['evoque']
filenames = ['*.evoque']
mimetypes = ['application/x-evoque']
flags = re.DOTALL
tokens = {
'root': [
(r'[^#$]+', Other),
(r'#\[', Comment.Multiline, 'comment'),
(r'\$\$', Other),
# svn keywords
(r'\$\w+:[^$\n]*\$', Comment.Multiline),
# directives: begin, end
(r'(\$)(begin|end)(\{(%)?)(.*?)((?(4)%)\})',
bygroups(Punctuation, Name.Builtin, Punctuation, None,
String, Punctuation)),
# directives: evoque, overlay
# see doc for handling first name arg: /directives/evoque/
# + minor inconsistency: the "name" in e.g. $overlay{name=site_base}
# should be using(PythonLexer), not passed out as String
(r'(\$)(evoque|overlay)(\{(%)?)(\s*[#\w\-"\'.]+[^=,%}]+?)?'
r'(.*?)((?(4)%)\})',
bygroups(Punctuation, Name.Builtin, Punctuation, None,
String, using(PythonLexer), Punctuation)),
# directives: if, for, prefer, test
(r'(\$)(\w+)(\{(%)?)(.*?)((?(4)%)\})',
bygroups(Punctuation, Name.Builtin, Punctuation, None,
using(PythonLexer), Punctuation)),
# directive clauses (no {} expression)
(r'(\$)(else|rof|fi)', bygroups(Punctuation, Name.Builtin)),
# expressions
(r'(\$\{(%)?)(.*?)((!)(.*?))?((?(2)%)\})',
bygroups(Punctuation, None, using(PythonLexer),
Name.Builtin, None, None, Punctuation)),
(r'#', Other),
],
'comment': [
(r'[^\]#]', Comment.Multiline),
(r'#\[', Comment.Multiline, '#push'),
(r'\]#', Comment.Multiline, '#pop'),
(r'[\]#]', Comment.Multiline)
],
}
class EvoqueHtmlLexer(DelegatingLexer):
"""
Subclass of the `EvoqueLexer` that highlights unlexed data with the
`HtmlLexer`.
.. versionadded:: 1.1
"""
name = 'HTML+Evoque'
aliases = ['html+evoque']
filenames = ['*.html']
mimetypes = ['text/html+evoque']
def __init__(self, **options):
super(EvoqueHtmlLexer, self).__init__(HtmlLexer, EvoqueLexer,
**options)
class EvoqueXmlLexer(DelegatingLexer):
"""
Subclass of the `EvoqueLexer` that highlights unlexed data with the
`XmlLexer`.
.. versionadded:: 1.1
"""
name = 'XML+Evoque'
aliases = ['xml+evoque']
filenames = ['*.xml']
mimetypes = ['application/xml+evoque']
def __init__(self, **options):
super(EvoqueXmlLexer, self).__init__(XmlLexer, EvoqueLexer,
**options)
class ColdfusionLexer(RegexLexer):
"""
Coldfusion statements
"""
name = 'cfstatement'
aliases = ['cfs']
filenames = []
mimetypes = []
flags = re.IGNORECASE
tokens = {
'root': [
(r'//.*?\n', Comment.Single),
(r'/\*(?:.|\n)*?\*/', Comment.Multiline),
(r'\+\+|--', Operator),
(r'[-+*/^&=!]', Operator),
(r'<=|>=|<|>|==', Operator),
(r'mod\b', Operator),
(r'(eq|lt|gt|lte|gte|not|is|and|or)\b', Operator),
(r'\|\||&&', Operator),
(r'\?', Operator),
(r'"', String.Double, 'string'),
# There is a special rule for allowing html in single quoted
# strings, evidently.
(r"'.*?'", String.Single),
(r'\d+', Number),
(r'(if|else|len|var|xml|default|break|switch|component|property|function|do|'
r'try|catch|in|continue|for|return|while|required|any|array|binary|boolean|'
r'component|date|guid|numeric|query|string|struct|uuid|case)\b', Keyword),
(r'(true|false|null)\b', Keyword.Constant),
(r'(application|session|client|cookie|super|this|variables|arguments)\b',
Name.Constant),
(r'([a-z_$][\w.]*)(\s*)(\()',
bygroups(Name.Function, Text, Punctuation)),
(r'[a-z_$][\w.]*', Name.Variable),
(r'[()\[\]{};:,.\\]', Punctuation),
(r'\s+', Text),
],
'string': [
(r'""', String.Double),
(r'#.+?#', String.Interp),
(r'[^"#]+', String.Double),
(r'#', String.Double),
(r'"', String.Double, '#pop'),
],
}
class ColdfusionMarkupLexer(RegexLexer):
"""
Coldfusion markup only
"""
name = 'Coldfusion'
aliases = ['cf']
filenames = []
mimetypes = []
tokens = {
'root': [
(r'[^<]+', Other),
include('tags'),
(r'<[^<>]*', Other),
],
'tags': [
(r'<!---', Comment.Multiline, 'cfcomment'),
(r'(?s)<!--.*?-->', Comment),
(r'<cfoutput.*?>', Name.Builtin, 'cfoutput'),
(r'(?s)(<cfscript.*?>)(.+?)(</cfscript.*?>)',
bygroups(Name.Builtin, using(ColdfusionLexer), Name.Builtin)),
# negative lookbehind is for strings with embedded >
(r'(?s)(</?cf(?:component|include|if|else|elseif|loop|return|'
r'dbinfo|dump|abort|location|invoke|throw|file|savecontent|'
r'mailpart|mail|header|content|zip|image|lock|argument|try|'
r'catch|break|directory|http|set|function|param)\b)(.*?)((?<!\\)>)',
bygroups(Name.Builtin, using(ColdfusionLexer), Name.Builtin)),
],
'cfoutput': [
(r'[^#<]+', Other),
(r'(#)(.*?)(#)', bygroups(Punctuation, using(ColdfusionLexer),
Punctuation)),
# (r'<cfoutput.*?>', Name.Builtin, '#push'),
(r'</cfoutput.*?>', Name.Builtin, '#pop'),
include('tags'),
(r'(?s)<[^<>]*', Other),
(r'#', Other),
],
'cfcomment': [
(r'<!---', Comment.Multiline, '#push'),
(r'--->', Comment.Multiline, '#pop'),
(r'([^<-]|<(?!!---)|-(?!-->))+', Comment.Multiline),
],
}
class ColdfusionHtmlLexer(DelegatingLexer):
"""
Coldfusion markup in html
"""
name = 'Coldfusion HTML'
aliases = ['cfm']
filenames = ['*.cfm', '*.cfml']
mimetypes = ['application/x-coldfusion']
def __init__(self, **options):
super(ColdfusionHtmlLexer, self).__init__(HtmlLexer, ColdfusionMarkupLexer,
**options)
class ColdfusionCFCLexer(DelegatingLexer):
"""
Coldfusion markup/script components
.. versionadded:: 2.0
"""
name = 'Coldfusion CFC'
aliases = ['cfc']
filenames = ['*.cfc']
mimetypes = []
def __init__(self, **options):
super(ColdfusionCFCLexer, self).__init__(ColdfusionHtmlLexer, ColdfusionLexer,
**options)
class SspLexer(DelegatingLexer):
"""
Lexer for Scalate Server Pages.
.. versionadded:: 1.4
"""
name = 'Scalate Server Page'
aliases = ['ssp']
filenames = ['*.ssp']
mimetypes = ['application/x-ssp']
def __init__(self, **options):
super(SspLexer, self).__init__(XmlLexer, JspRootLexer, **options)
def analyse_text(text):
rv = 0.0
if re.search('val \w+\s*:', text):
rv += 0.6
if looks_like_xml(text):
rv += 0.2
if '<%' in text and '%>' in text:
rv += 0.1
return rv
class TeaTemplateRootLexer(RegexLexer):
"""
Base for the `TeaTemplateLexer`. Yields `Token.Other` for area outside of
code blocks.
.. versionadded:: 1.5
"""
tokens = {
'root': [
(r'<%\S?', Keyword, 'sec'),
(r'[^<]+', Other),
(r'<', Other),
],
'sec': [
(r'%>', Keyword, '#pop'),
# note: '\w\W' != '.' without DOTALL.
(r'[\w\W]+?(?=%>|\Z)', using(TeaLangLexer)),
],
}
class TeaTemplateLexer(DelegatingLexer):
"""
Lexer for `Tea Templates <http://teatrove.org/>`_.
.. versionadded:: 1.5
"""
name = 'Tea'
aliases = ['tea']
filenames = ['*.tea']
mimetypes = ['text/x-tea']
def __init__(self, **options):
super(TeaTemplateLexer, self).__init__(XmlLexer,
TeaTemplateRootLexer, **options)
def analyse_text(text):
rv = TeaLangLexer.analyse_text(text) - 0.01
if looks_like_xml(text):
rv += 0.4
if '<%' in text and '%>' in text:
rv += 0.1
return rv
class LassoHtmlLexer(DelegatingLexer):
"""
Subclass of the `LassoLexer` which highlights unhandled data with the
`HtmlLexer`.
Nested JavaScript and CSS is also highlighted.
.. versionadded:: 1.6
"""
name = 'HTML+Lasso'
aliases = ['html+lasso']
alias_filenames = ['*.html', '*.htm', '*.xhtml', '*.lasso', '*.lasso[89]',
'*.incl', '*.inc', '*.las']
mimetypes = ['text/html+lasso',
'application/x-httpd-lasso',
'application/x-httpd-lasso[89]']
def __init__(self, **options):
super(LassoHtmlLexer, self).__init__(HtmlLexer, LassoLexer, **options)
def analyse_text(text):
rv = LassoLexer.analyse_text(text) - 0.01
if html_doctype_matches(text): # same as HTML lexer
rv += 0.5
return rv
class LassoXmlLexer(DelegatingLexer):
"""
Subclass of the `LassoLexer` which highlights unhandled data with the
`XmlLexer`.
.. versionadded:: 1.6
"""
name = 'XML+Lasso'
aliases = ['xml+lasso']
alias_filenames = ['*.xml', '*.lasso', '*.lasso[89]',
'*.incl', '*.inc', '*.las']
mimetypes = ['application/xml+lasso']
def __init__(self, **options):
super(LassoXmlLexer, self).__init__(XmlLexer, LassoLexer, **options)
def analyse_text(text):
rv = LassoLexer.analyse_text(text) - 0.01
if looks_like_xml(text):
rv += 0.4
return rv
class LassoCssLexer(DelegatingLexer):
"""
Subclass of the `LassoLexer` which highlights unhandled data with the
`CssLexer`.
.. versionadded:: 1.6
"""
name = 'CSS+Lasso'
aliases = ['css+lasso']
alias_filenames = ['*.css']
mimetypes = ['text/css+lasso']
def __init__(self, **options):
options['requiredelimiters'] = True
super(LassoCssLexer, self).__init__(CssLexer, LassoLexer, **options)
def analyse_text(text):
rv = LassoLexer.analyse_text(text) - 0.05
if re.search(r'\w+:.+?;', text):
rv += 0.1
if 'padding:' in text:
rv += 0.1
return rv
class LassoJavascriptLexer(DelegatingLexer):
"""
Subclass of the `LassoLexer` which highlights unhandled data with the
`JavascriptLexer`.
.. versionadded:: 1.6
"""
name = 'JavaScript+Lasso'
aliases = ['js+lasso', 'javascript+lasso']
alias_filenames = ['*.js']
mimetypes = ['application/x-javascript+lasso',
'text/x-javascript+lasso',
'text/javascript+lasso']
def __init__(self, **options):
options['requiredelimiters'] = True
super(LassoJavascriptLexer, self).__init__(JavascriptLexer, LassoLexer,
**options)
def analyse_text(text):
rv = LassoLexer.analyse_text(text) - 0.05
if 'function' in text:
rv += 0.2
return rv
class HandlebarsLexer(RegexLexer):
"""
Generic `handlebars <http://handlebarsjs.com/>` template lexer.
Highlights only the Handlebars template tags (stuff between `{{` and `}}`).
Everything else is left for a delegating lexer.
.. versionadded:: 2.0
"""
name = "Handlebars"
aliases = ['handlebars']
tokens = {
'root': [
(r'[^{]+', Other),
(r'\{\{!.*\}\}', Comment),
(r'(\{\{\{)(\s*)', bygroups(Comment.Special, Text), 'tag'),
(r'(\{\{)(\s*)', bygroups(Comment.Preproc, Text), 'tag'),
],
'tag': [
(r'\s+', Text),
(r'\}\}\}', Comment.Special, '#pop'),
(r'\}\}', Comment.Preproc, '#pop'),
# Handlebars
(r'([#/]*)(each|if|unless|else|with|log|in)', bygroups(Keyword,
Keyword)),
# General {{#block}}
(r'([#/])([\w-]+)', bygroups(Name.Function, Name.Function)),
# {{opt=something}}
(r'([\w-]+)(=)', bygroups(Name.Attribute, Operator)),
# borrowed from DjangoLexer
(r':?"(\\\\|\\"|[^"])*"', String.Double),
(r":?'(\\\\|\\'|[^'])*'", String.Single),
(r'[a-zA-Z][\w-]*', Name.Variable),
(r'\.[\w-]+', Name.Variable),
(r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
r"0[xX][0-9a-fA-F]+[Ll]?", Number),
]
}
class HandlebarsHtmlLexer(DelegatingLexer):
"""
Subclass of the `HandlebarsLexer` that highlights unlexed data with the
`HtmlLexer`.
.. versionadded:: 2.0
"""
name = "HTML+Handlebars"
aliases = ["html+handlebars"]
filenames = ['*.handlebars', '*.hbs']
mimetypes = ['text/html+handlebars', 'text/x-handlebars-template']
def __init__(self, **options):
super(HandlebarsHtmlLexer, self).__init__(HtmlLexer, HandlebarsLexer, **options)
class YamlJinjaLexer(DelegatingLexer):
"""
Subclass of the `DjangoLexer` that highighlights unlexed data with the
`YamlLexer`.
Commonly used in Saltstack salt states.
.. versionadded:: 2.0
"""
name = 'YAML+Jinja'
aliases = ['yaml+jinja', 'salt', 'sls']
filenames = ['*.sls']
mimetypes = ['text/x-yaml+jinja', 'text/x-sls']
def __init__(self, **options):
super(YamlJinjaLexer, self).__init__(YamlLexer, DjangoLexer, **options)
class LiquidLexer(RegexLexer):
"""
Lexer for `Liquid templates
<http://www.rubydoc.info/github/Shopify/liquid>`_.
.. versionadded:: 2.0
"""
name = 'liquid'
aliases = ['liquid']
filenames = ['*.liquid']
tokens = {
'root': [
(r'[^{]+', Text),
# tags and block tags
(r'(\{%)(\s*)', bygroups(Punctuation, Whitespace), 'tag-or-block'),
# output tags
(r'(\{\{)(\s*)([^\s}]+)',
bygroups(Punctuation, Whitespace, using(this, state = 'generic')),
'output'),
(r'\{', Text)
],
'tag-or-block': [
# builtin logic blocks
(r'(if|unless|elsif|case)(?=\s+)', Keyword.Reserved, 'condition'),
(r'(when)(\s+)', bygroups(Keyword.Reserved, Whitespace),
combined('end-of-block', 'whitespace', 'generic')),
(r'(else)(\s*)(%\})',
bygroups(Keyword.Reserved, Whitespace, Punctuation), '#pop'),
# other builtin blocks
(r'(capture)(\s+)([^\s%]+)(\s*)(%\})',
bygroups(Name.Tag, Whitespace, using(this, state = 'variable'),
Whitespace, Punctuation), '#pop'),
(r'(comment)(\s*)(%\})',
bygroups(Name.Tag, Whitespace, Punctuation), 'comment'),
(r'(raw)(\s*)(%\})',
bygroups(Name.Tag, Whitespace, Punctuation), 'raw'),
# end of block
(r'(end(case|unless|if))(\s*)(%\})',
bygroups(Keyword.Reserved, None, Whitespace, Punctuation), '#pop'),
(r'(end([^\s%]+))(\s*)(%\})',
bygroups(Name.Tag, None, Whitespace, Punctuation), '#pop'),
# builtin tags (assign and include are handled together with usual tags)
(r'(cycle)(\s+)(?:([^\s:]*)(:))?(\s*)',
bygroups(Name.Tag, Whitespace,
using(this, state='generic'), Punctuation, Whitespace),
'variable-tag-markup'),
# other tags or blocks
(r'([^\s%]+)(\s*)', bygroups(Name.Tag, Whitespace), 'tag-markup')
],
'output': [
include('whitespace'),
('\}\}', Punctuation, '#pop'), # end of output
(r'\|', Punctuation, 'filters')
],
'filters': [
include('whitespace'),
(r'\}\}', Punctuation, ('#pop', '#pop')), # end of filters and output
(r'([^\s|:]+)(:?)(\s*)',
bygroups(Name.Function, Punctuation, Whitespace), 'filter-markup')
],
'filter-markup': [
(r'\|', Punctuation, '#pop'),
include('end-of-tag'),
include('default-param-markup')
],
'condition': [
include('end-of-block'),
include('whitespace'),
(r'([^\s=!><]+)(\s*)([=!><]=?)(\s*)(\S+)(\s*)(%\})',
bygroups(using(this, state = 'generic'), Whitespace, Operator,
Whitespace, using(this, state = 'generic'), Whitespace,
Punctuation)),
(r'\b!', Operator),
(r'\bnot\b', Operator.Word),
(r'([\w.\'"]+)(\s+)(contains)(\s+)([\w.\'"]+)',
bygroups(using(this, state = 'generic'), Whitespace, Operator.Word,
Whitespace, using(this, state = 'generic'))),
include('generic'),
include('whitespace')
],
'generic-value': [
include('generic'),
include('end-at-whitespace')
],
'operator': [
(r'(\s*)((=|!|>|<)=?)(\s*)',
bygroups(Whitespace, Operator, None, Whitespace), '#pop'),
(r'(\s*)(\bcontains\b)(\s*)',
bygroups(Whitespace, Operator.Word, Whitespace), '#pop'),
],
'end-of-tag': [
(r'\}\}', Punctuation, '#pop')
],
'end-of-block': [
(r'%\}', Punctuation, ('#pop', '#pop'))
],
'end-at-whitespace': [
(r'\s+', Whitespace, '#pop')
],
# states for unknown markup
'param-markup': [
include('whitespace'),
# params with colons or equals
(r'([^\s=:]+)(\s*)(=|:)',
bygroups(Name.Attribute, Whitespace, Operator)),
# explicit variables
(r'(\{\{)(\s*)([^\s}])(\s*)(\}\})',
bygroups(Punctuation, Whitespace, using(this, state = 'variable'),
Whitespace, Punctuation)),
include('string'),
include('number'),
include('keyword'),
(r',', Punctuation)
],
'default-param-markup': [
include('param-markup'),
(r'.', Text) # fallback for switches / variables / un-quoted strings / ...
],
'variable-param-markup': [
include('param-markup'),
include('variable'),
(r'.', Text) # fallback
],
'tag-markup': [
(r'%\}', Punctuation, ('#pop', '#pop')), # end of tag
include('default-param-markup')
],
'variable-tag-markup': [
(r'%\}', Punctuation, ('#pop', '#pop')), # end of tag
include('variable-param-markup')
],
# states for different values types
'keyword': [
(r'\b(false|true)\b', Keyword.Constant)
],
'variable': [
(r'[a-zA-Z_]\w*', Name.Variable),
(r'(?<=\w)\.(?=\w)', Punctuation)
],
'string': [
(r"'[^']*'", String.Single),
(r'"[^"]*"', String.Double)
],
'number': [
(r'\d+\.\d+', Number.Float),
(r'\d+', Number.Integer)
],
'generic': [ # decides for variable, string, keyword or number
include('keyword'),
include('string'),
include('number'),
include('variable')
],
'whitespace': [
(r'[ \t]+', Whitespace)
],
# states for builtin blocks
'comment': [
(r'(\{%)(\s*)(endcomment)(\s*)(%\})',
bygroups(Punctuation, Whitespace, Name.Tag, Whitespace,
Punctuation), ('#pop', '#pop')),
(r'.', Comment)
],
'raw': [
(r'[^{]+', Text),
(r'(\{%)(\s*)(endraw)(\s*)(%\})',
bygroups(Punctuation, Whitespace, Name.Tag, Whitespace,
Punctuation), '#pop'),
(r'\{', Text)
],
}
class TwigLexer(RegexLexer):
"""
`Twig <http://twig.sensiolabs.org/>`_ template lexer.
It just highlights Twig code between the preprocessor directives,
other data is left untouched by the lexer.
.. versionadded:: 2.0
"""
name = 'Twig'
aliases = ['twig']
mimetypes = ['application/x-twig']
flags = re.M | re.S
# Note that a backslash is included in the following two patterns
# PHP uses a backslash as a namespace separator
_ident_char = r'[\\\w-]|[^\x00-\x7f]'
_ident_begin = r'(?:[\\_a-z]|[^\x00-\x7f])'
_ident_end = r'(?:' + _ident_char + ')*'
_ident_inner = _ident_begin + _ident_end
tokens = {
'root': [
(r'[^{]+', Other),
(r'\{\{', Comment.Preproc, 'var'),
# twig comments
(r'\{\#.*?\#\}', Comment),
# raw twig blocks
(r'(\{%)(-?\s*)(raw)(\s*-?)(%\})(.*?)'
r'(\{%)(-?\s*)(endraw)(\s*-?)(%\})',
bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
Other, Comment.Preproc, Text, Keyword, Text,
Comment.Preproc)),
(r'(\{%)(-?\s*)(verbatim)(\s*-?)(%\})(.*?)'
r'(\{%)(-?\s*)(endverbatim)(\s*-?)(%\})',
bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
Other, Comment.Preproc, Text, Keyword, Text,
Comment.Preproc)),
# filter blocks
(r'(\{%%)(-?\s*)(filter)(\s+)(%s)' % _ident_inner,
bygroups(Comment.Preproc, Text, Keyword, Text, Name.Function),
'tag'),
(r'(\{%)(-?\s*)([a-zA-Z_]\w*)',
bygroups(Comment.Preproc, Text, Keyword), 'tag'),
(r'\{', Other),
],
'varnames': [
(r'(\|)(\s*)(%s)' % _ident_inner,
bygroups(Operator, Text, Name.Function)),
(r'(is)(\s+)(not)?(\s*)(%s)' % _ident_inner,
bygroups(Keyword, Text, Keyword, Text, Name.Function)),
(r'(?i)(true|false|none|null)\b', Keyword.Pseudo),
(r'(in|not|and|b-and|or|b-or|b-xor|is'
r'if|elseif|else|import'
r'constant|defined|divisibleby|empty|even|iterable|odd|sameas'
r'matches|starts\s+with|ends\s+with)\b',
Keyword),
(r'(loop|block|parent)\b', Name.Builtin),
(_ident_inner, Name.Variable),
(r'\.' + _ident_inner, Name.Variable),
(r'\.[0-9]+', Number),
(r':?"(\\\\|\\"|[^"])*"', String.Double),
(r":?'(\\\\|\\'|[^'])*'", String.Single),
(r'([{}()\[\]+\-*/,:~%]|\.\.|\?|:|\*\*|\/\/|!=|[><=]=?)', Operator),
(r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
r"0[xX][0-9a-fA-F]+[Ll]?", Number),
],
'var': [
(r'\s+', Text),
(r'(-?)(\}\})', bygroups(Text, Comment.Preproc), '#pop'),
include('varnames')
],
'tag': [
(r'\s+', Text),
(r'(-?)(%\})', bygroups(Text, Comment.Preproc), '#pop'),
include('varnames'),
(r'.', Punctuation),
],
}
class TwigHtmlLexer(DelegatingLexer):
"""
Subclass of the `TwigLexer` that highlights unlexed data with the
`HtmlLexer`.
.. versionadded:: 2.0
"""
name = "HTML+Twig"
aliases = ["html+twig"]
filenames = ['*.twig']
mimetypes = ['text/html+twig']
def __init__(self, **options):
super(TwigHtmlLexer, self).__init__(HtmlLexer, TwigLexer, **options)
|
erikdejonge/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/npr.py
|
7
|
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
qualities,
)
class NprIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?npr\.org/(?:sections/[^/]+/)?\d{4}/\d{2}/\d{2}/(?P<id>\d+)'
_TESTS = [{
'url': 'https://www.npr.org/sections/allsongs/2015/10/21/449974205/new-music-from-beach-house-chairlift-cmj-discoveries-and-more',
'info_dict': {
'id': '449974205',
'title': 'New Music From Beach House, Chairlift, CMJ Discoveries And More'
},
'playlist_count': 7,
}, {
'url': 'https://www.npr.org/sections/deceptivecadence/2015/10/09/446928052/music-from-the-shadows-ancient-armenian-hymns-and-piano-jazz',
'info_dict': {
'id': '446928052',
'title': "Songs We Love: Tigran Hamasyan, 'Your Mercy is Boundless'"
},
'playlist': [{
'md5': '12fa60cb2d3ed932f53609d4aeceabf1',
'info_dict': {
'id': '446929930',
'ext': 'mp3',
'title': 'Your Mercy is Boundless (Bazum en Qo gtutyunqd)',
'duration': 402,
},
}],
}, {
# mutlimedia, not media title
'url': 'https://www.npr.org/2017/06/19/533198237/tigers-jaw-tiny-desk-concert',
'info_dict': {
'id': '533198237',
'title': 'Tigers Jaw: Tiny Desk Concert',
},
'playlist': [{
'md5': '12fa60cb2d3ed932f53609d4aeceabf1',
'info_dict': {
'id': '533201718',
'ext': 'mp4',
'title': 'Tigers Jaw: Tiny Desk Concert',
'duration': 402,
},
}],
'expected_warnings': ['Failed to download m3u8 information'],
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
story = self._download_json(
'http://api.npr.org/query', playlist_id, query={
'id': playlist_id,
'fields': 'audio,multimedia,title',
'format': 'json',
'apiKey': 'MDAzMzQ2MjAyMDEyMzk4MTU1MDg3ZmM3MQ010',
})['list']['story'][0]
playlist_title = story.get('title', {}).get('$text')
KNOWN_FORMATS = ('threegp', 'm3u8', 'smil', 'mp4', 'mp3')
quality = qualities(KNOWN_FORMATS)
entries = []
for media in story.get('audio', []) + story.get('multimedia', []):
media_id = media['id']
formats = []
for format_id, formats_entry in media.get('format', {}).items():
if not formats_entry:
continue
if isinstance(formats_entry, list):
formats_entry = formats_entry[0]
format_url = formats_entry.get('$text')
if not format_url:
continue
if format_id in KNOWN_FORMATS:
if format_id == 'm3u8':
formats.extend(self._extract_m3u8_formats(
format_url, media_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
elif format_id == 'smil':
smil_formats = self._extract_smil_formats(
format_url, media_id, transform_source=lambda s: s.replace(
'rtmp://flash.npr.org/ondemand/', 'https://ondemand.npr.org/'))
self._check_formats(smil_formats, media_id)
formats.extend(smil_formats)
else:
formats.append({
'url': format_url,
'format_id': format_id,
'quality': quality(format_id),
})
self._sort_formats(formats)
entries.append({
'id': media_id,
'title': media.get('title', {}).get('$text') or playlist_title,
'thumbnail': media.get('altImageUrl', {}).get('$text'),
'duration': int_or_none(media.get('duration', {}).get('$text')),
'formats': formats,
})
return self.playlist_result(entries, playlist_id, playlist_title)
|
MattsFleaMarket/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/importlib/test/__init__.py
|
12133432
| |
femmerling/DirMaker
|
refs/heads/master
|
box/lib/python2.7/site-packages/migrate/versioning/templates/repository/__init__.py
|
12133432
| |
aviarypl/mozilla-l10n-addons-server
|
refs/heads/master
|
src/olympia/devhub/management/__init__.py
|
12133432
| |
wangxiangyu/horizon
|
refs/heads/stable/kilo
|
openstack_dashboard/dashboards/project/images/snapshots/__init__.py
|
12133432
| |
nevir/plexability
|
refs/heads/master
|
extern/depot_tools/third_party/logilab/__init__.py
|
12133432
| |
blockstack/packaging
|
refs/heads/master
|
imported/future/src/future/moves/tkinter/simpledialog.py
|
118
|
from __future__ import absolute_import
from future.utils import PY3
if PY3:
from tkinter.simpledialog import *
else:
try:
from SimpleDialog import *
except ImportError:
raise ImportError('The SimpleDialog module is missing. Does your Py2 '
'installation include tkinter?')
|
elvrsn/share-thoughts
|
refs/heads/master
|
iwas/urls.py
|
1
|
from django.conf.urls import patterns, include, url
#from django.contrib import admin
#admin.autodiscover()
import os
from views import *
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'iwas.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', home),
url(r'^share/$', toshare),
url(r'^about/$', about),
url(r'^feedback/$', feedback),
url(r'^thanks/$',thanks,name="thanks_page"),
url(r'^feedback_thanks/$', feedback_thanks),
#url(r'^admin/', include(admin.site.urls)),
)
|
CristianCantoro/wikidump
|
refs/heads/master
|
wikidump/extractors/misc.py
|
1
|
"""Various extractors."""
import functools
import regex
import signal
import itertools
from more_itertools import peekable
from typing import (Callable, Iterable, Iterator, List, TypeVar, NamedTuple,
Optional)
from . import arxiv, doi, isbn, pubmed
from .common import CaptureResult, Span
from .. import timeout
# empty generator
# Python Empty Generator Function
# https://stackoverflow.com/a/13243870/2377454
def empty_generator():
yield from ()
class Section:
"""Section class."""
def __init__(self, name: str, level: int, body: str):
"""Instantiate a section."""
self.name = name
self.level = level
self.body = body
self._full_body = None
@property
def is_preamble(self):
"""Return True when this section is the preamble of the page."""
return self.level == 0
@property
def full_body(self) -> str:
"""Get the full body of the section."""
if self._full_body is not None:
return self._full_body
if self.is_preamble:
full_body = self.body
else:
equals = ''.join('=' for _ in range(self.level))
full_body = '{equals}{name}{equals}\n{body}'.format(
equals=equals,
name=self.name,
body=self.body,
)
self._full_body = full_body
return full_body
def __repr__(self):
'Return a nicely formatted representation string'
template = '{class_name}(name={name!r}, level={level!r}, '\
'body={body!r})'
return template.format(
class_name=self.__class__.__name__,
name=self.name,
level=self.level,
body=self.body[:20],
)
section_header_re = regex.compile(
r'''^
(?P<equals>=+) # Match the equals, greedy
(?P<section_name> # <section_name>:
.+? # Text inside, non-greedy
)
(?P=equals)\s* # Re-match the equals
$
''', regex.VERBOSE | regex.MULTILINE)
templates_re = regex.compile(
r'''
\{\{
(?P<content>(?s).*?)
\}\}
''', regex.VERBOSE)
@functools.lru_cache(maxsize=1000)
def _pattern_or(words: List) -> str:
words_joined = '|'.join(words)
return r'(?:{})'.format(words_joined)
def references(source: str) -> Iterator[CaptureResult[str]]:
"""Return all the references found in the document."""
pattern = regex.compile(
r'''
<ref
.*?
<\/ref>
''', regex.VERBOSE | regex.IGNORECASE | regex.DOTALL)
for match in pattern.finditer(source):
yield CaptureResult(match.group(0), Span(*match.span()))
def sections(source: str, include_preamble: bool=False) \
-> Iterator[CaptureResult[Section]]:
"""Return the sections found in the document."""
section_header_matches = peekable(section_header_re.finditer(source))
if include_preamble:
try:
body_end = section_header_matches.peek().start()
body_end -= 1 # Don't include the newline before the next section
except StopIteration:
body_end = len(source)
preamble = Section(
name='',
level=0,
body=source[:body_end],
)
yield CaptureResult(preamble, Span(0, body_end))
for match in section_header_matches:
name = match.group('section_name')
level = len(match.group('equals'))
body_begin = match.end() + 1 # Don't include the newline after
try:
body_end = section_header_matches.peek().start()
body_end -= 1 # Don't include the newline before the next section
except StopIteration:
body_end = len(source)
section = Section(
name=name,
level=level,
body=source[body_begin:body_end],
)
yield CaptureResult(section, Span(match.start(), body_end))
# @functools.lru_cache(maxsize=10)
# @utils.listify
# def citations(source, language):
# citation_synonyms = languages.citation[language]
# citation_synonyms_pattern = _pattern_or(citation_synonyms)
# pattern = regex.compile(
# r'''
# \{\{
# \s*
# %s
# \s+
# (?:(?s).*?)
# \}\}
# ''' % (citation_synonyms_pattern,), regex.VERBOSE
# )
# for match in pattern.finditer(source):
# yield match.group(0)
def templates(source: str) -> Iterator[CaptureResult[str]]:
"""Return all the templates found in the document."""
for match in templates_re.finditer(source):
yield CaptureResult(match.group(0), Span(*match.span()))
T = TypeVar('T')
Extractor = Callable[[str], T]
def pub_identifiers(source: str, extractors: Iterable[Extractor]=None) -> T:
"""Return all the identifiers found in the document."""
if extractors is None:
extractors = (
arxiv.extract,
doi.extract,
isbn.extract,
pubmed.extract,
)
for identifier_extractor in extractors:
for capture in identifier_extractor(source):
yield capture
class Wikilink:
"""Link class."""
def __init__(self,
link: str,
tosection: str,
anchor: str,
section_name: str,
section_level: int,
section_number: int):
"""Instantiate a link."""
self.link = link
self.tosection = tosection
self.anchor = anchor
self.section_name = section_name
self.section_level = section_level
self.section_number = section_number
def __repr__(self):
'Return a nicely formatted representation string'
template = '{class_name}(link={link!r}, anchor={anchor!r})'
return template.format(
class_name=self.__class__.__name__,
link=self.link,
anchor=self.anchor,
)
# See https://regex101.com/r/kF0yC9/15
#
# The group 'total' matches everything that is around the link, delimited by
# spaces, this is because the actual anchor text can be built prepending or
# appending text to the actual wikilink. So [[apple]]s will point to the page
# 'Apple', but it will be visualized as 'apples', including the "s".
#
# The group 'wikilink' matches the whole wikilink, including square brackets.
#
# The text inside the 'link' group is title of the page, it is limited to 256
# chars since it is the max supported by MediaWiki for page titles [1].
# Furthermore:
# * pipes and brackets (|,[,]) are invalid characters for page
# titles [2];
# * newlines are not allowed [3]
# * the pound sign (#) is not allowed in page titles, but links can point
# to sections and we want to capture that.
# The anchor text allows pipes and closed brackets, but not open ones [3],
# newlines are allowed [3].
# See:
# [1] https://en.wikipedia.org/w/index.php?title=Wikipedia:Wikipedia_records\
# &oldid=709472636#Article_with_longest_title
# [2] https://www.mediawiki.org/w/index.php?title=Manual:$wgLegalTitleChars\
# &oldid=1274292
# [3] https://it.wikipedia.org/w/index.php?\
# title=Utente:CristianCantoro/Sandbox&oldid=79784393#Test_regexp
REGEX_TIMEOUT = 5
wikilink_re = regex.compile(
r'''(?P<total> # named group <total>:
(?P<wikilink> # <wikilink>:
# Match the whole wikilink
# - including brackets
\[\[ # Match two opening brackets
(?P<link> # <link>:
[^\n\|\]\[\<\>\{\}]{0,256} # Text inside link group
# everything not illegal in page
# title except pound-sign,
# non-greedy
# can be empty or up to 256 chars
) #
(?: # Non-capturing group
\| # Match a pipe
(?P<anchor> # <anchor>:
[^\[]*? # Test inside anchor group:
# match everything not an open
# bracket - non greedily
# if empty the anchor text is link
) #
)? # anchor text is optional
\]\] # Match two closing brackets
) # Close wikilink group
\w* # Any additional alpahnumeric
# character, non-alphanumeric
# chars limit the anchor
) # Close total
\s? # Match optional space
''', regex.VERBOSE | regex.MULTILINE)
# wikilink_simple_re = regex.compile(
# r'''\[\[ # Match two opening brackets
# [^\n\]\[]+?
# \]\] # Match two closing brackets
# ''', regex.VERBOSE | regex.MULTILINE)
# # match any non-alphanumeric character
# endanchor_re = regex.compile(r'\W')
# the regex module supports reverse search
# https://pypi.org/project/regex/
# space_rtl_re = regex.compile(r"(?r)\s")
SectionLimits = NamedTuple('SectionLimits', [
('name', str),
('level', int),
('number', int),
('begin', int),
('end', bool)
])
def reverse_position(revpos: int, strlen: int) -> int:
if revpos == -1:
return -1
# the modulus of a negative number is positive, also this number goes
# from 0 to strlen-1 (same as the valid indexes of a string).
#
# This is the logic:
# original vector reversed vector
# |0 |1 |2 |3 |4 |5 | |0 |1 |2 |3 |4 |5 |
# |->|->|->|* |# |->| => |<-|# |* |<-|<-|<-|
#
# -(revpos+1) |-1|-2|-3|-4|-5|-6|
# -(revpos+1) % strlen |5 |4 |3 |2 |1 |0 |
#
# revpos = 1 => pos = 4
return -(revpos+1) % strlen
def wikilinks(page_title: str,
source: str,
sections: Iterator[CaptureResult[Section]],
debug: Optional[bool]=False) \
-> Iterator[CaptureResult[Wikilink]]:
"""Return the wikilinks found in the document."""
wikilink_matches = empty_generator()
if debug:
try:
wikilink_matches = timeout.wrap_timeout(
lambda t: peekable(wikilink_re.finditer(t,concurrent=True)),
REGEX_TIMEOUT,
[source]
)
except CallTimeout as exception:
import ipdb; ipdb.set_trace()
else:
wikilink_matches = peekable(
wikilink_re.finditer(source,concurrent=True)
)
sections_limits = [SectionLimits(name=section.name,
level=section.level,
number=idx,
begin=span.begin,
end=span.end)
for idx, (section, span) in enumerate(sections, 1)]
last_section_seen = 0
prevmatch_start = 0
prevmatch_end = 0
for match in wikilink_matches:
prevmatch_start = match.start()
prevmatch_end = match.end()
link = match.group('link') or ''
link = link.strip()
# split on '#' (link to section)
tosection = ''
if '#' in link:
splitlink = link.split('#', 1)
link = splitlink[0]
if not link:
link = page_title
tosection = splitlink[1]
anchor = match.group('anchor') or link
# newlines in anchor are visualized as spaces.
anchor = anchor.replace('\n', ' ')
anchor = ' '.join(anchor.strip().split())
total_start = match.start('total')
total_end = match.end('total')
link_section_number = 0
link_section_name = '---~--- incipit ---~---'
link_section_level = 0
for section in sections_limits[last_section_seen:]:
if section.begin <= total_start <= section.end:
link_section_number = section.number
link_section_name = section.name
link_section_level = section.level
last_section_seen = (link_section_number - 1)\
if link_section_number > 0 else 0
break
# There are cases in which in the wikitext you will find cases
# such as:
# - [[ |yoda]]
# - [[|diety|god]]
# - [[]]
# Note: * the first case never occours, because the (software)
# editor would auto-correct it in [[yoda]], it may be
# something that could occour in a past version of the
# software.
# * in the last case both 'link' and 'anchor' are empty)
#
# We consider these cases to be broken no matter what and we
# ignore them
if not link:
continue
wikilink = Wikilink(
link=link,
anchor=anchor,
tosection=tosection,
section_name=link_section_name,
section_level=link_section_level,
section_number=link_section_number
)
anchor_prefix = (source[match.start('total'):
match.start('wikilink')]
.strip('[')
)
anchor_suffix = (source[match.end('wikilink'):
match.end('total')]
.strip(']')
)
anchor = anchor_prefix + anchor + anchor_suffix
# print(source[total_start:total_end])
yield CaptureResult(wikilink, Span(total_start, total_end))
return
|
monikagrabowska/osf.io
|
refs/heads/develop
|
api_tests/comments/views/test_comment_report_detail.py
|
6
|
import mock
import pytest
from django.utils import timezone
from nose.tools import * # flake8: noqa
from datetime import datetime
from framework.guid.model import Guid
from api.base.settings.defaults import API_BASE
from api_tests import utils as test_utils
from tests.base import ApiTestCase
from osf_tests.factories import ProjectFactory, AuthUserFactory, CommentFactory
from addons.wiki.tests.factories import NodeWikiFactory
class ReportDetailViewMixin(object):
def setUp(self):
super(ReportDetailViewMixin, self).setUp()
self.user = AuthUserFactory()
self.contributor = AuthUserFactory()
self.non_contributor = AuthUserFactory()
self.payload = {
'data': {
'id': self.user._id,
'type': 'comment_reports',
'attributes': {
'category': 'spam',
'message': 'Spam is delicious.'
}
}
}
def _set_up_private_project_comment_reports(self):
raise NotImplementedError
def _set_up_public_project_comment_reports(self):
raise NotImplementedError
def test_private_node_reporting_contributor_can_view_report_detail(self):
self._set_up_private_project_comment_reports()
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], self.user._id)
def test_private_node_reported_contributor_cannot_view_report_detail(self):
self._set_up_private_project_comment_reports()
res = self.app.get(self.private_url, auth=self.contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_private_node_logged_in_non_contributor_cannot_view_report_detail(self):
self._set_up_private_project_comment_reports()
res = self.app.get(self.private_url, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_private_node_logged_out_contributor_cannot_view_report_detail(self):
self._set_up_private_project_comment_reports()
res = self.app.get(self.private_url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_public_node_reporting_contributor_can_view_report_detail(self):
self._set_up_public_project_comment_reports()
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], self.user._id)
def test_public_node_reported_contributor_cannot_view_report_detail(self):
self._set_up_public_project_comment_reports()
res = self.app.get(self.public_url, auth=self.contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_public_node_logged_in_non_contributor_cannot_view_other_users_report_detail(self):
self._set_up_public_project_comment_reports()
res = self.app.get(self.public_url, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_public_node_logged_out_contributor_cannot_view_report_detail(self):
self._set_up_public_project_comment_reports()
res = self.app.get(self.public_url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_public_node_logged_in_non_contributor_reporter_can_view_own_report_detail(self):
self._set_up_public_project_comment_reports()
self.public_comment.reports[self.non_contributor._id] = {
'category': 'spam',
'text': 'This is spam',
'date': timezone.now(),
'retracted': False,
}
self.public_comment.save()
url = '/{}comments/{}/reports/{}/'.format(API_BASE, self.public_comment._id, self.non_contributor._id)
res = self.app.get(url, auth=self.non_contributor.auth)
assert_equal(res.status_code, 200)
def test_private_node_reporting_contributor_can_update_report_detail(self):
self._set_up_private_project_comment_reports()
res = self.app.put_json_api(self.private_url, self.payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], self.user._id)
assert_equal(res.json['data']['attributes']['message'], self.payload['data']['attributes']['message'])
def test_private_node_reported_contributor_cannot_update_report_detail(self):
self._set_up_private_project_comment_reports()
res = self.app.put_json_api(self.private_url, self.payload, auth=self.contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_private_node_logged_in_non_contributor_cannot_update_report_detail(self):
self._set_up_private_project_comment_reports()
res = self.app.put_json_api(self.private_url, self.payload, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_private_node_logged_out_contributor_cannot_update_detail(self):
self._set_up_private_project_comment_reports()
res = self.app.put_json_api(self.private_url, self.payload, expect_errors=True)
assert_equal(res.status_code, 401)
def test_public_node_reporting_contributor_can_update_detail(self):
self._set_up_public_project_comment_reports()
res = self.app.put_json_api(self.public_url, self.payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], self.user._id)
assert_equal(res.json['data']['attributes']['message'], self.payload['data']['attributes']['message'])
def test_public_node_reported_contributor_cannot_update_detail(self):
self._set_up_public_project_comment_reports()
res = self.app.put_json_api(self.public_url, self.payload, auth=self.contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_public_node_logged_in_non_contributor_cannot_update_other_users_report_detail(self):
self._set_up_public_project_comment_reports()
res = self.app.put_json_api(self.public_url, self.payload, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_public_node_logged_out_contributor_cannot_update_report_detail(self):
self._set_up_public_project_comment_reports()
res = self.app.put_json_api(self.public_url, self.payload, expect_errors=True)
assert_equal(res.status_code, 401)
def test_public_node_logged_in_non_contributor_reporter_can_update_own_report_detail(self):
self._set_up_public_project_comment_reports()
self.public_comment.reports[self.non_contributor._id] = {
'category': 'spam',
'text': 'This is spam',
'date': timezone.now(),
'retracted': False,
}
self.public_comment.save()
url = '/{}comments/{}/reports/{}/'.format(API_BASE, self.public_comment._id, self.non_contributor._id)
payload = {
'data': {
'id': self.non_contributor._id,
'type': 'comment_reports',
'attributes': {
'category': 'spam',
'message': 'Spam is delicious.'
}
}
}
res = self.app.put_json_api(url, payload, auth=self.non_contributor.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['message'], payload['data']['attributes']['message'])
def test_private_node_reporting_contributor_can_delete_report_detail(self):
self._set_up_private_project_comment_reports()
comment = CommentFactory.build(node=self.private_project, user=self.contributor, target=self.comment.target)
comment.reports = {self.user._id: {
'category': 'spam',
'text': 'This is spam',
'date': timezone.now(),
'retracted': False,
}}
comment.save()
url = '/{}comments/{}/reports/{}/'.format(API_BASE, comment._id, self.user._id)
res = self.app.delete_json_api(url, auth=self.user.auth)
assert_equal(res.status_code, 204)
def test_private_node_reported_contributor_cannot_delete_report_detail(self):
self._set_up_private_project_comment_reports()
res = self.app.delete_json_api(self.private_url, auth=self.contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_private_node_logged_in_non_contributor_cannot_delete_report_detail(self):
self._set_up_private_project_comment_reports()
res = self.app.delete_json_api(self.private_url, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_private_node_logged_out_contributor_cannot_delete_detail(self):
self._set_up_private_project_comment_reports()
res = self.app.delete_json_api(self.private_url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_public_node_reporting_contributor_can_delete_detail(self):
self._set_up_public_project_comment_reports()
res = self.app.delete_json_api(self.public_url, auth=self.user.auth)
assert_equal(res.status_code, 204)
def test_public_node_reported_contributor_cannot_delete_detail(self):
self._set_up_public_project_comment_reports()
res = self.app.delete_json_api(self.public_url, auth=self.contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_public_node_logged_in_non_contributor_cannot_delete_other_users_report_detail(self):
self._set_up_public_project_comment_reports()
res = self.app.delete_json_api(self.public_url, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_public_node_logged_out_contributor_cannot_delete_report_detail(self):
self._set_up_public_project_comment_reports()
res = self.app.delete_json_api(self.public_url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_public_node_logged_in_non_contributor_reporter_can_delete_own_report_detail(self):
self._set_up_public_project_comment_reports()
self.public_comment.reports[self.non_contributor._id] = {
'category': 'spam',
'text': 'This is spam',
'date': timezone.now(),
'retracted': False,
}
self.public_comment.save()
url = '/{}comments/{}/reports/{}/'.format(API_BASE, self.public_comment._id, self.non_contributor._id)
res = self.app.delete_json_api(url, auth=self.non_contributor.auth)
assert_equal(res.status_code, 204)
class TestReportDetailView(ReportDetailViewMixin, ApiTestCase):
def _set_up_private_project_comment_reports(self):
self.private_project = ProjectFactory.create(is_public=False, creator=self.user)
self.private_project.add_contributor(contributor=self.contributor, save=True)
self.comment = CommentFactory.build(node=self.private_project, user=self.contributor)
self.comment.reports = {self.user._id: {
'category': 'spam',
'text': 'This is spam',
'date': timezone.now(),
'retracted': False,
}}
self.comment.save()
self.private_url = '/{}comments/{}/reports/{}/'.format(API_BASE, self.comment._id, self.user._id)
def _set_up_public_project_comment_reports(self):
self.public_project = ProjectFactory.create(is_public=True, creator=self.user)
self.public_project.add_contributor(contributor=self.contributor, save=True)
self.public_comment = CommentFactory.build(node=self.public_project, user=self.contributor)
self.public_comment.reports = {self.user._id: {
'category': 'spam',
'text': 'This is spam',
'date': timezone.now(),
'retracted': False,
}}
self.public_comment.save()
self.public_url = '/{}comments/{}/reports/{}/'.format(API_BASE, self.public_comment._id, self.user._id)
class TestFileCommentReportDetailView(ReportDetailViewMixin, ApiTestCase):
def _set_up_private_project_comment_reports(self):
self.private_project = ProjectFactory.create(is_public=False, creator=self.user)
self.private_project.add_contributor(contributor=self.contributor, save=True)
self.file = test_utils.create_test_file(self.private_project, self.user)
self.comment = CommentFactory.build(node=self.private_project, target=self.file.get_guid(), user=self.contributor)
self.comment.reports = {self.user._id: {
'category': 'spam',
'text': 'This is spam',
'date': timezone.now(),
'retracted': False,
}}
self.comment.save()
self.private_url = '/{}comments/{}/reports/{}/'.format(API_BASE, self.comment._id, self.user._id)
def _set_up_public_project_comment_reports(self):
self.public_project = ProjectFactory.create(is_public=True, creator=self.user)
self.public_project.add_contributor(contributor=self.contributor, save=True)
self.public_file = test_utils.create_test_file(self.public_project, self.user)
self.public_comment = CommentFactory.build(node=self.public_project, target=self.public_file.get_guid(), user=self.contributor)
self.public_comment.reports = {self.user._id: {
'category': 'spam',
'text': 'This is spam',
'date': timezone.now(),
'retracted': False,
}}
self.public_comment.save()
self.public_url = '/{}comments/{}/reports/{}/'.format(API_BASE, self.public_comment._id, self.user._id)
class TestWikiCommentReportDetailView(ReportDetailViewMixin, ApiTestCase):
def _set_up_private_project_comment_reports(self):
self.private_project = ProjectFactory.create(is_public=False, creator=self.user)
self.private_project.add_contributor(contributor=self.contributor, save=True)
with mock.patch('osf.models.AbstractNode.update_search'):
self.wiki = NodeWikiFactory(node=self.private_project, user=self.user)
self.comment = CommentFactory.build(node=self.private_project, target=Guid.load(self.wiki._id), user=self.contributor)
self.comment.reports = {self.user._id: {
'category': 'spam',
'text': 'This is spam',
'date': timezone.now(),
'retracted': False,
}}
self.comment.save()
self.private_url = '/{}comments/{}/reports/{}/'.format(API_BASE, self.comment._id, self.user._id)
def _set_up_public_project_comment_reports(self):
self.public_project = ProjectFactory.create(is_public=True, creator=self.user)
self.public_project.add_contributor(contributor=self.contributor, save=True)
with mock.patch('osf.models.AbstractNode.update_search'):
self.public_wiki = NodeWikiFactory(node=self.public_project, user=self.user)
self.public_comment = CommentFactory.build(node=self.public_project, target=Guid.load(self.public_wiki._id), user=self.contributor)
self.public_comment.reports = {self.user._id: {
'category': 'spam',
'text': 'This is spam',
'date': timezone.now(),
'retracted': False,
}}
self.public_comment.save()
self.public_url = '/{}comments/{}/reports/{}/'.format(API_BASE, self.public_comment._id, self.user._id)
|
StudentCV/TableSoccerCV
|
refs/heads/master
|
GrabVideo.py
|
1
|
#Copyright 2016 StudentCV
#Copyright and related rights are licensed under the
#Solderpad Hardware License, Version 0.51 (the “License”);
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at http://solderpad.org/licenses/SHL-0.51.
#Unless required by applicable law or agreed to in writing,
#software, hardware and materials distributed under this License
#is distributed on an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
#either express or implied. See the License for the specific language
#governing permissions and limitations under the License.
# coding: utf-8
# In[1]:
import numpy as np
import cv2
# In[5]:
cap = cv2.VideoCapture(r'.\Kicker Aufnahmen\acA800-510uc__21726923__20151202_114339102.avi')
# In[ ]:
# In[ ]:
while(cap.isOpened()):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('frame',gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
# In[ ]:
|
demon-ru/iml-crm
|
refs/heads/master
|
addons/website/__openerp__.py
|
27
|
{
'name': 'Website Builder',
'category': 'Website',
'summary': 'Build Your Enterprise Website',
'version': '1.0',
'description': """
OpenERP Website CMS
===================
""",
'author': 'OpenERP SA',
'depends': ['web', 'share', 'mail'],
'installable': True,
'data': [
'data/data.xml',
'security/ir.model.access.csv',
'security/ir_ui_view.xml',
'views/website_templates.xml',
'views/website_views.xml',
'views/snippets.xml',
'views/themes.xml',
'views/res_config.xml',
'views/ir_actions.xml',
'views/website_backend_navbar.xml',
],
'demo': [
'data/demo.xml',
],
'qweb': ['static/src/xml/website.backend.xml'],
'application': True,
}
|
jteehan/cfme_tests
|
refs/heads/master
|
utils/tests/test_soft_get.py
|
4
|
import pytest
from utils.soft_get import soft_get, MultipleResultsException
def test_soft_get():
class TestObj(object):
a = 1
b = 2
c = 3
aa = 11
bb = 22
bbb = 222
container_image = 'container_image'
image_registry = 'image_registry'
test_dict = {'a': 1, 'b': 2, 'c': 3, 'aa': 11, 'bb': 22,
'container_image': 'container_image',
'image_registry': 'image_registry'}
for tested in (TestObj, test_dict):
is_dict = (type(tested) is dict)
with pytest.raises(AttributeError):
soft_get(tested, 'no_such_attr', dict_=is_dict)
with pytest.raises(MultipleResultsException):
soft_get(tested, 'a', dict_=is_dict, best_match=False)
with pytest.raises(AttributeError):
soft_get(tested, 'Aa', dict_=is_dict, case_sensitive=True)
if not is_dict:
with pytest.raises(TypeError):
soft_get(tested, 'a', dict_=True)
assert soft_get(tested, 'a', dict_=is_dict) == 1
assert soft_get(tested, 'bb', dict_=is_dict) == 22
assert soft_get(tested, 'image', dict_=is_dict) == 'image_registry'
assert soft_get(tested, 'image', dict_=is_dict, dont_include=['registry']) \
== 'container_image'
|
vmanoria/bluemix-hue-filebrowser
|
refs/heads/master
|
hue-3.8.1-bluemix/desktop/core/ext-py/Django-1.6.10/django/conf/locale/gl/__init__.py
|
12133432
| |
da1z/intellij-community
|
refs/heads/master
|
python/testData/refactoring/move/moduleToNonPackage/after/src/nonp3/__init__.py
|
12133432
| |
ehashman/oh-mainline
|
refs/heads/master
|
vendor/packages/Django/tests/regressiontests/__init__.py
|
12133432
| |
mapr/hue
|
refs/heads/hue-3.9.0-mapr
|
desktop/core/ext-py/Django-1.6.10/tests/signals/__init__.py
|
12133432
| |
kustodian/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/network/junos/facts/legacy/__init__.py
|
12133432
| |
JetBrains/intellij-community
|
refs/heads/master
|
python/testData/inspections/RenameShadowingBuiltins.py
|
83
|
def f(name):
<weak_warning descr="Shadows built-in name 'file'">f<caret>ile</weak_warning> = open(name, 'rb')
return file.read()
|
Memrise/unidecode
|
refs/heads/master
|
unidecode/x025.py
|
252
|
data = (
'-', # 0x00
'-', # 0x01
'|', # 0x02
'|', # 0x03
'-', # 0x04
'-', # 0x05
'|', # 0x06
'|', # 0x07
'-', # 0x08
'-', # 0x09
'|', # 0x0a
'|', # 0x0b
'+', # 0x0c
'+', # 0x0d
'+', # 0x0e
'+', # 0x0f
'+', # 0x10
'+', # 0x11
'+', # 0x12
'+', # 0x13
'+', # 0x14
'+', # 0x15
'+', # 0x16
'+', # 0x17
'+', # 0x18
'+', # 0x19
'+', # 0x1a
'+', # 0x1b
'+', # 0x1c
'+', # 0x1d
'+', # 0x1e
'+', # 0x1f
'+', # 0x20
'+', # 0x21
'+', # 0x22
'+', # 0x23
'+', # 0x24
'+', # 0x25
'+', # 0x26
'+', # 0x27
'+', # 0x28
'+', # 0x29
'+', # 0x2a
'+', # 0x2b
'+', # 0x2c
'+', # 0x2d
'+', # 0x2e
'+', # 0x2f
'+', # 0x30
'+', # 0x31
'+', # 0x32
'+', # 0x33
'+', # 0x34
'+', # 0x35
'+', # 0x36
'+', # 0x37
'+', # 0x38
'+', # 0x39
'+', # 0x3a
'+', # 0x3b
'+', # 0x3c
'+', # 0x3d
'+', # 0x3e
'+', # 0x3f
'+', # 0x40
'+', # 0x41
'+', # 0x42
'+', # 0x43
'+', # 0x44
'+', # 0x45
'+', # 0x46
'+', # 0x47
'+', # 0x48
'+', # 0x49
'+', # 0x4a
'+', # 0x4b
'-', # 0x4c
'-', # 0x4d
'|', # 0x4e
'|', # 0x4f
'-', # 0x50
'|', # 0x51
'+', # 0x52
'+', # 0x53
'+', # 0x54
'+', # 0x55
'+', # 0x56
'+', # 0x57
'+', # 0x58
'+', # 0x59
'+', # 0x5a
'+', # 0x5b
'+', # 0x5c
'+', # 0x5d
'+', # 0x5e
'+', # 0x5f
'+', # 0x60
'+', # 0x61
'+', # 0x62
'+', # 0x63
'+', # 0x64
'+', # 0x65
'+', # 0x66
'+', # 0x67
'+', # 0x68
'+', # 0x69
'+', # 0x6a
'+', # 0x6b
'+', # 0x6c
'+', # 0x6d
'+', # 0x6e
'+', # 0x6f
'+', # 0x70
'/', # 0x71
'\\', # 0x72
'X', # 0x73
'-', # 0x74
'|', # 0x75
'-', # 0x76
'|', # 0x77
'-', # 0x78
'|', # 0x79
'-', # 0x7a
'|', # 0x7b
'-', # 0x7c
'|', # 0x7d
'-', # 0x7e
'|', # 0x7f
'#', # 0x80
'#', # 0x81
'#', # 0x82
'#', # 0x83
'#', # 0x84
'#', # 0x85
'#', # 0x86
'#', # 0x87
'#', # 0x88
'#', # 0x89
'#', # 0x8a
'#', # 0x8b
'#', # 0x8c
'#', # 0x8d
'#', # 0x8e
'#', # 0x8f
'#', # 0x90
'#', # 0x91
'#', # 0x92
'#', # 0x93
'-', # 0x94
'|', # 0x95
'[?]', # 0x96
'[?]', # 0x97
'[?]', # 0x98
'[?]', # 0x99
'[?]', # 0x9a
'[?]', # 0x9b
'[?]', # 0x9c
'[?]', # 0x9d
'[?]', # 0x9e
'[?]', # 0x9f
'#', # 0xa0
'#', # 0xa1
'#', # 0xa2
'#', # 0xa3
'#', # 0xa4
'#', # 0xa5
'#', # 0xa6
'#', # 0xa7
'#', # 0xa8
'#', # 0xa9
'#', # 0xaa
'#', # 0xab
'#', # 0xac
'#', # 0xad
'#', # 0xae
'#', # 0xaf
'#', # 0xb0
'#', # 0xb1
'^', # 0xb2
'^', # 0xb3
'^', # 0xb4
'^', # 0xb5
'>', # 0xb6
'>', # 0xb7
'>', # 0xb8
'>', # 0xb9
'>', # 0xba
'>', # 0xbb
'V', # 0xbc
'V', # 0xbd
'V', # 0xbe
'V', # 0xbf
'<', # 0xc0
'<', # 0xc1
'<', # 0xc2
'<', # 0xc3
'<', # 0xc4
'<', # 0xc5
'*', # 0xc6
'*', # 0xc7
'*', # 0xc8
'*', # 0xc9
'*', # 0xca
'*', # 0xcb
'*', # 0xcc
'*', # 0xcd
'*', # 0xce
'*', # 0xcf
'*', # 0xd0
'*', # 0xd1
'*', # 0xd2
'*', # 0xd3
'*', # 0xd4
'*', # 0xd5
'*', # 0xd6
'*', # 0xd7
'*', # 0xd8
'*', # 0xd9
'*', # 0xda
'*', # 0xdb
'*', # 0xdc
'*', # 0xdd
'*', # 0xde
'*', # 0xdf
'*', # 0xe0
'*', # 0xe1
'*', # 0xe2
'*', # 0xe3
'*', # 0xe4
'*', # 0xe5
'*', # 0xe6
'#', # 0xe7
'#', # 0xe8
'#', # 0xe9
'#', # 0xea
'#', # 0xeb
'^', # 0xec
'^', # 0xed
'^', # 0xee
'O', # 0xef
'#', # 0xf0
'#', # 0xf1
'#', # 0xf2
'#', # 0xf3
'#', # 0xf4
'#', # 0xf5
'#', # 0xf6
'#', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
|
AlienVault-Engineering/service-manager
|
refs/heads/master
|
src/unittest/resources/service_templates_test/pylib/{{cookiecutter.project_slug}}/src/main/python/noop.py
|
145
|
pass
|
Alexander-M-Waldman/local_currency_site
|
refs/heads/master
|
lib/python2.7/site-packages/django/views/static.py
|
300
|
"""
Views and functions for serving static files. These are only to be used
during development, and SHOULD NOT be used in a production setting.
"""
from __future__ import unicode_literals
import mimetypes
import os
import posixpath
import re
import stat
from django.http import (
FileResponse, Http404, HttpResponse, HttpResponseNotModified,
HttpResponseRedirect,
)
from django.template import Context, Engine, TemplateDoesNotExist, loader
from django.utils.http import http_date, parse_http_date
from django.utils.six.moves.urllib.parse import unquote
from django.utils.translation import ugettext as _, ugettext_lazy
def serve(request, path, document_root=None, show_indexes=False):
"""
Serve static files below a given point in the directory structure.
To use, put a URL pattern such as::
from django.views.static import serve
url(r'^(?P<path>.*)$', serve, {'document_root': '/path/to/my/files/'})
in your URLconf. You must provide the ``document_root`` param. You may
also set ``show_indexes`` to ``True`` if you'd like to serve a basic index
of the directory. This index view will use the template hardcoded below,
but if you'd like to override it, you can create a template called
``static/directory_index.html``.
"""
path = posixpath.normpath(unquote(path))
path = path.lstrip('/')
newpath = ''
for part in path.split('/'):
if not part:
# Strip empty path components.
continue
drive, part = os.path.splitdrive(part)
head, part = os.path.split(part)
if part in (os.curdir, os.pardir):
# Strip '.' and '..' in path.
continue
newpath = os.path.join(newpath, part).replace('\\', '/')
if newpath and path != newpath:
return HttpResponseRedirect(newpath)
fullpath = os.path.join(document_root, newpath)
if os.path.isdir(fullpath):
if show_indexes:
return directory_index(newpath, fullpath)
raise Http404(_("Directory indexes are not allowed here."))
if not os.path.exists(fullpath):
raise Http404(_('"%(path)s" does not exist') % {'path': fullpath})
# Respect the If-Modified-Since header.
statobj = os.stat(fullpath)
if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'),
statobj.st_mtime, statobj.st_size):
return HttpResponseNotModified()
content_type, encoding = mimetypes.guess_type(fullpath)
content_type = content_type or 'application/octet-stream'
response = FileResponse(open(fullpath, 'rb'), content_type=content_type)
response["Last-Modified"] = http_date(statobj.st_mtime)
if stat.S_ISREG(statobj.st_mode):
response["Content-Length"] = statobj.st_size
if encoding:
response["Content-Encoding"] = encoding
return response
DEFAULT_DIRECTORY_INDEX_TEMPLATE = """
{% load i18n %}
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-type" content="text/html; charset=utf-8" />
<meta http-equiv="Content-Language" content="en-us" />
<meta name="robots" content="NONE,NOARCHIVE" />
<title>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</title>
</head>
<body>
<h1>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</h1>
<ul>
{% if directory != "/" %}
<li><a href="../">../</a></li>
{% endif %}
{% for f in file_list %}
<li><a href="{{ f|urlencode }}">{{ f }}</a></li>
{% endfor %}
</ul>
</body>
</html>
"""
template_translatable = ugettext_lazy("Index of %(directory)s")
def directory_index(path, fullpath):
try:
t = loader.select_template([
'static/directory_index.html',
'static/directory_index',
])
except TemplateDoesNotExist:
t = Engine().from_string(DEFAULT_DIRECTORY_INDEX_TEMPLATE)
files = []
for f in os.listdir(fullpath):
if not f.startswith('.'):
if os.path.isdir(os.path.join(fullpath, f)):
f += '/'
files.append(f)
c = Context({
'directory': path + '/',
'file_list': files,
})
return HttpResponse(t.render(c))
def was_modified_since(header=None, mtime=0, size=0):
"""
Was something modified since the user last downloaded it?
header
This is the value of the If-Modified-Since header. If this is None,
I'll just return True.
mtime
This is the modification time of the item we're talking about.
size
This is the size of the item we're talking about.
"""
try:
if header is None:
raise ValueError
matches = re.match(r"^([^;]+)(; length=([0-9]+))?$", header,
re.IGNORECASE)
header_mtime = parse_http_date(matches.group(1))
header_len = matches.group(3)
if header_len and int(header_len) != size:
raise ValueError
if int(mtime) > header_mtime:
raise ValueError
except (AttributeError, ValueError, OverflowError):
return True
return False
|
greenpau/PyEwsClient
|
refs/heads/master
|
pyewsclient/__init__.py
|
1
|
# PyEwsClient - Microsoft Office 365 EWS (Exchange Web Services) Client Library
# Copyright (C) 2013 Paul Greenberg <paul@greenberg.pro>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__all__ = ["ews_session", "ews_helper", "ews_email", "ews_attachment"];
from pyewsclient.ews_helper import EWSXmlSchemaValidator;
from pyewsclient.ews_session import EWSSession;
from pyewsclient.ews_email import EWSEmail;
from pyewsclient.ews_attachment import EWSAttachment;
|
virtUOS/courseware
|
refs/heads/master
|
tests/selenium_webdriver/selenium-webdriver-test-quick/videoblock.py
|
2
|
# -*- coding: iso-8859-15 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
import unittest, time, re
import mysuite
class VideoBlock(unittest.TestCase):
def setUp(self):
self.driver = mysuite.getOrCreateWebdriver()
self.driver.implicitly_wait(30)
self.base_url = "http://vm036.rz.uos.de/studip/mooc/"
self.verificationErrors = []
self.accept_next_alert = True
def test_video_block(self):
driver = self.driver
driver.find_element_by_css_selector("button.author").click()
driver.find_element_by_xpath("//button[@data-blocktype='VideoBlock']").click()
for i in range(60):
try:
if self.is_element_present(By.CSS_SELECTOR, "section.VideoBlock"): break
except: pass
time.sleep(1)
else: self.fail("time out")
#driver.find_element_by_css_selector("div.controls.editable > button.author").click()
for i in range(60):
try:
if self.is_element_present(By.CSS_SELECTOR, "p > input[type=\"text\"]"): break
except: pass
time.sleep(1)
else: self.fail("time out")
driver.find_element_by_css_selector("div.block-content > p> input[type=\"text\"]").clear()
driver.find_element_by_css_selector("div.block-content > p> input[type=\"text\"]").send_keys("https://video3.virtuos.uni-osnabrueck.de/static/engage-player/b89aa8f8-251c-49db-9ceb-fea6e79c86e6/987ba5be-d194-46b8-84da-b9721628586e/MOOC_Vornberger_5.mp4")
driver.find_element_by_name("save").click()
try: self.assertTrue(self.is_element_present(By.XPATH, "//iframe[@src='https://video3.virtuos.uni-osnabrueck.de/static/engage-player/b89aa8f8-251c-49db-9ceb-fea6e79c86e6/987ba5be-d194-46b8-84da-b9721628586e/MOOC_Vornberger_5.mp4']"))
except AssertionError as e: self.verificationErrors.append(str(e))
driver.find_element_by_css_selector("div.controls.editable > button.trash").click()
self.assertRegexpMatches(self.close_alert_and_get_its_text(), r"^Wollen Sie wirklich löschen[\s\S]$")
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException, e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
#self.driver.quit()
time.sleep(1)
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
|
pipermerriam/eth-testrpc
|
refs/heads/master
|
tests/client/test_get_block_number.py
|
4
|
def test_get_block_number(client):
assert client.get_block_number() == 0
client.wait_for_block(10)
assert client.get_block_number() == 10
|
dudymas/python-openstacksdk
|
refs/heads/master
|
openstack/network/v2/network.py
|
3
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.network import network_service
from openstack import resource
class Network(resource.Resource):
resource_key = 'network'
resources_key = 'networks'
base_path = '/networks'
service = network_service.NetworkService()
# capabilities
allow_create = True
allow_retrieve = True
allow_update = True
allow_delete = True
allow_list = True
# Properties
#: The administrative state of the network, which is up ``True`` or
#: down ``False``. *Type: bool*
admin_state_up = resource.prop('admin_state_up', type=bool)
#: The network name.
name = resource.prop('name')
#: The project this network is associated with.
project_id = resource.prop('tenant_id')
# TODO(briancurtin): These provider_ explanations are made up because
# they're undocumented.
#: Type of network, such as vlan.
provider_network_type = resource.prop('provider:network_type')
#: ID of the physical network.
provider_physical_network = resource.prop('provider:physical_network')
#: Segmentation ID.
provider_segmentation_id = resource.prop('provider:segmentation_id')
#: Whether or not the router is external. *Type: bool*
router_external = resource.prop('router:external')
#: Whether or not the router is 'External' or 'Internal'.
router_type = resource.prop('router_type')
segments = resource.prop('segments')
#: Indicates whether this network is shared across all tenants.
#: By default, only administrative users can change this value.
#: *Type: bool*
shared = resource.prop('shared', type=bool)
#: The network status.
status = resource.prop('status')
#: The associated subnets.
subnets = resource.prop('subnets')
def is_external(self):
if self.router_external is not None:
return bool(self.router_external)
if self.router_type == 'External':
return True
return False
|
Salmista-94/Ninja_3.0_PyQt5
|
refs/heads/master
|
ninja_ide/gui/main_panel/browser_widget.py
|
1
|
# -*- coding: utf-8 -*-
#
# This file is part of NINJA-IDE (http://ninja-ide.org).
#
# NINJA-IDE is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# NINJA-IDE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NINJA-IDE; If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import unicode_literals
import time
from PyQt5.QtWidgets import QWidget
from PyQt5.QtWidgets import QVBoxLayout
from PyQt5.QtCore import Qt
from PyQt5.QtCore import QUrl
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtWebKitWidgets import QWebView
from ninja_ide.core.file_handling import file_manager
class BrowserWidget(QWidget):
###############################################################################
# RecentProjectItem SIGNALS
###############################################################################
"""
openProject(QString)
openPreferences()
dontOpenStartPage()
"""
openProject = pyqtSignal(str)
openPreferences = pyqtSignal()
dontOpenStartPage = pyqtSignal()
###############################################################################
def __init__(self, url, process=None, parent=None):
super(BrowserWidget, self).__init__(parent)
self._process = process
vbox = QVBoxLayout(self)
#Web Frame
self.webFrame = QWebView(self)
self.webFrame.setAcceptDrops(False)
self.webFrame.load(QUrl(url))
vbox.addWidget(self.webFrame)
if process is not None:
time.sleep(0.5)
self.webFrame.load(QUrl(url))
self.webFrame.page().currentFrame().setScrollBarPolicy(
Qt.Vertical, Qt.ScrollBarAsNeeded)
self.webFrame.page().currentFrame().setScrollBarPolicy(
Qt.Horizontal, Qt.ScrollBarAsNeeded)
def start_page_operations(self, url):
opt = file_manager.get_basename(url.toString())
#self.emit(SIGNAL(opt))
getattr(self, url.toString()).emit()
def shutdown_pydoc(self):
if self._process is not None:
self._process.kill()
def find_match(self, word, back=False, sensitive=False, whole=False):
self.webFrame.page().findText(word)
|
AccelAI/accel.ai
|
refs/heads/master
|
flask-aws/lib/python2.7/site-packages/ebcli/objects/requests.py
|
2
|
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from ..lib import ec2, utils
from ..resources.strings import strings
from ..resources.statics import namespaces, option_names
class CreateEnvironmentRequest(object):
def __init__(self, app_name=None, env_name=None, cname=None, platform=None,
tier=None, instance_type=None, version_label=None,
instance_profile=None, service_role=None,
single_instance=False, key_name=None,
sample_application=False, tags=None, scale=None,
database=None, vpc=None, template_name=None, group_name=None,
elb_type=None):
self.app_name = app_name
self.cname = cname
self.env_name = env_name
self.instance_profile = instance_profile
self.instance_type = instance_type
self.key_name = key_name
self.platform = platform
self.sample_application = sample_application
self.service_role = service_role
self.single_instance = single_instance
self.template_name = template_name
self.tier = tier
self.version_label = version_label
self.group_name = group_name
if tags is None:
self.tags = []
else:
self.tags = list(tags)
if database is None:
self.database = {}
else:
self.database = dict(database)
if vpc is None:
self.vpc = {}
else:
self.vpc = dict(vpc)
self.elb_type = elb_type
self.scale = None
self.option_settings = []
self.compiled = False
self.description = strings['env.description']
if not self.app_name:
raise TypeError(self.__class__.__name__ + ' requires key-word argument app_name')
if not self.env_name:
raise TypeError(self.__class__.__name__ + ' requires key-word argument env_name')
if scale:
if not isinstance(scale, int):
raise TypeError('key-word argument scale must be of type int')
else:
self.scale = str(scale)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return self.__dict__ != other.__dict__
def add_option_setting(self, namespace, option_name, value, resource=None):
setting = {'Namespace': namespace,
'OptionName': option_name,
'Value': value}
if resource:
setting['ResourceName'] = resource
self.option_settings.append(setting)
def convert_to_kwargs(self):
self.compile_option_settings()
return self.get_standard_kwargs()
def compile_option_settings(self):
if not self.compiled:
self.add_client_defaults()
self.compile_database_options()
self.compile_vpc_options()
self.compile_common_options()
self.compiled = True
def get_standard_kwargs(self):
kwargs = {
'ApplicationName': self.app_name,
'EnvironmentName': self.env_name,
'OptionSettings': self.option_settings,
}
if self.platform:
kwargs['SolutionStackName'] = self.platform.name
if self.description:
kwargs['Description'] = self.description
if self.cname:
kwargs['CNAMEPrefix'] = self.cname
if self.template_name:
kwargs['TemplateName'] = self.template_name
if self.version_label:
kwargs['VersionLabel'] = self.version_label
if self.tags:
kwargs['Tags'] = self.tags
if self.tier:
kwargs['Tier'] = self.tier.to_struct()
if self.scale:
self.add_option_setting(
namespaces.AUTOSCALING,
option_names.MAX_SIZE,
self.scale)
self.add_option_setting(
namespaces.AUTOSCALING,
option_names.MIN_SIZE,
self.scale)
return kwargs
def compile_common_options(self):
if self.instance_profile:
self.add_option_setting(
namespaces.LAUNCH_CONFIGURATION,
option_names.IAM_INSTANCE_PROFILE,
self.instance_profile)
if self.service_role:
self.add_option_setting(
namespaces.ENVIRONMENT,
option_names.SERVICE_ROLE,
self.service_role
)
if self.instance_type:
self.add_option_setting(
namespaces.LAUNCH_CONFIGURATION,
option_names.INSTANCE_TYPE,
self.instance_type)
if self.single_instance:
self.add_option_setting(
namespaces.ENVIRONMENT,
option_names.ENVIRONMENT_TYPE,
'SingleInstance')
if self.key_name:
self.add_option_setting(
namespaces.LAUNCH_CONFIGURATION,
option_names.EC2_KEY_NAME,
self.key_name)
if self.scale:
self.add_option_setting(
namespaces.AUTOSCALING,
option_names.MAX_SIZE,
self.scale)
self.add_option_setting(
namespaces.AUTOSCALING,
option_names.MIN_SIZE,
self.scale)
if self.elb_type:
self.add_option_setting(
namespaces.ENVIRONMENT,
option_names.LOAD_BALANCER_TYPE,
self.elb_type)
def add_client_defaults(self):
if self.template_name:
# dont add client defaults if a template is being used
return
if not self.instance_type:
if ec2.has_default_vpc():
# Launch with t2 micro if not a classic account
self.add_option_setting(
namespaces.LAUNCH_CONFIGURATION,
option_names.INSTANCE_TYPE,
't2.micro'
)
if self.platform.has_healthd_support():
self.add_option_setting(
namespaces.HEALTH_SYSTEM,
option_names.SYSTEM_TYPE,
'enhanced')
self.add_option_setting(
namespaces.COMMAND,
option_names.BATCH_SIZE,
'30')
self.add_option_setting(
namespaces.COMMAND,
option_names.BATCH_SIZE_TYPE,
'Percentage')
if not self.tier or self.tier.name.lower() == 'webserver':
self.add_option_setting(
namespaces.ELB_POLICIES,
option_names.CONNECTION_DRAINING,
'true')
self.add_option_setting(
namespaces.LOAD_BALANCER,
option_names.CROSS_ZONE,
'true')
if not self.single_instance:
self.add_option_setting(
namespaces.ROLLING_UPDATES,
option_names.ROLLING_UPDATE_ENABLED,
'true')
self.add_option_setting(
namespaces.ROLLING_UPDATES,
option_names.ROLLING_UPDATE_TYPE,
'Health')
def compile_database_options(self):
if not self.database:
return
namespace = namespaces.RDS
self.add_option_setting(namespace, option_names.DB_PASSWORD,
self.database['password'])
self.add_option_setting(namespace, option_names.DB_USER,
self.database['username'])
if self.database['instance']:
self.add_option_setting(namespace, option_names.DB_INSTANCE,
self.database['instance'])
if self.database['size']:
self.add_option_setting(namespace, option_names.DB_STORAGE_SIZE,
self.database['size'])
if self.database['engine']:
self.add_option_setting(namespace, option_names.DB_ENGINE,
self.database['engine'])
if self.database['version']:
self.add_option_setting(namespace, option_names.DB_ENGINE_VERSION,
self.database['version'])
self.add_option_setting(namespace, option_names.DB_DELETION_POLICY,
'Snapshot')
def compile_vpc_options(self):
if not self.vpc:
return
namespace = namespaces.VPC
self.add_option_setting(namespace, option_names.VPC_ID,
self.vpc['id'])
self.add_option_setting(namespace, option_names.PUBLIC_IP,
self.vpc['publicip'])
self.add_option_setting(namespace, option_names.ELB_SCHEME,
self.vpc['elbscheme'])
if self.vpc['elbsubnets']:
self.add_option_setting(namespace, option_names.ELB_SUBNETS,
self.vpc['elbsubnets'])
if self.vpc['ec2subnets']:
self.add_option_setting(namespace, option_names.SUBNETS,
self.vpc['ec2subnets'])
if self.vpc['securitygroups']:
self.add_option_setting(namespaces.LAUNCH_CONFIGURATION,
option_names.SECURITY_GROUPS,
self.vpc['securitygroups'])
if self.vpc['dbsubnets']:
self.add_option_setting(namespace, option_names.DB_SUBNETS,
self.vpc['dbsubnets'])
class CloneEnvironmentRequest(CreateEnvironmentRequest):
def __init__(self, app_name=None, env_name=None, original_name=None,
cname=None, platform=None, scale=None, tags=None):
if not original_name:
raise TypeError(self.__class__.__name__ + ' requires key-word argument clone_name')
self.original_name = original_name
super(CloneEnvironmentRequest, self).__init__(
app_name=app_name, env_name=env_name, cname=cname,
platform=platform, scale=scale, tags=tags
)
self.description = strings['env.clonedescription']. \
replace('{env-name}', self.env_name)
def compile_option_settings(self):
if not self.compiled:
# dont compile extras like vpc/database/etc.
self.compile_common_options()
self.compiled = True
|
Osmose/snippets-service
|
refs/heads/master
|
snippets/base/migrations/0014_populate_countries.py
|
2
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from snippets.base import ENGLISH_COUNTRY_CHOICES
class Migration(DataMigration):
def forwards(self, orm):
for country_code, country_name in ENGLISH_COUNTRY_CHOICES:
orm['base.TargetedCountry'].objects.get_or_create(code=country_code)
def backwards(self, orm):
orm['base.TargetedCountry'].objects.all().delete()
models = {
u'base.clientmatchrule': {
'Meta': {'ordering': "('-modified',)", 'object_name': 'ClientMatchRule'},
'appbuildid': ('snippets.base.fields.RegexField', [], {'max_length': '255', 'blank': 'True'}),
'build_target': ('snippets.base.fields.RegexField', [], {'max_length': '255', 'blank': 'True'}),
'channel': ('snippets.base.fields.RegexField', [], {'max_length': '255', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'distribution': ('snippets.base.fields.RegexField', [], {'max_length': '255', 'blank': 'True'}),
'distribution_version': ('snippets.base.fields.RegexField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_exclusion': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locale': ('snippets.base.fields.RegexField', [], {'max_length': '255', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('snippets.base.fields.RegexField', [], {'max_length': '255', 'blank': 'True'}),
'os_version': ('snippets.base.fields.RegexField', [], {'max_length': '255', 'blank': 'True'}),
'startpage_version': ('snippets.base.fields.RegexField', [], {'max_length': '255', 'blank': 'True'}),
'version': ('snippets.base.fields.RegexField', [], {'max_length': '255', 'blank': 'True'})
},
u'base.jsonsnippet': {
'Meta': {'ordering': "('-modified',)", 'object_name': 'JSONSnippet'},
'client_match_rules': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['base.ClientMatchRule']", 'symmetrical': 'False', 'blank': 'True'}),
'country': ('snippets.base.fields.CountryField', [], {'default': "''", 'max_length': '16', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'icon': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'on_aurora': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'on_beta': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'on_nightly': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'on_release': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'on_startpage_1': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'publish_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'publish_start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '100'})
},
u'base.jsonsnippetlocale': {
'Meta': {'object_name': 'JSONSnippetLocale'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locale': ('snippets.base.fields.LocaleField', [], {'default': "'en-US'", 'max_length': '32'}),
'snippet': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'locale_set'", 'to': u"orm['base.JSONSnippet']"})
},
u'base.searchprovider': {
'Meta': {'ordering': "('id',)", 'object_name': 'SearchProvider'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'base.snippet': {
'Meta': {'ordering': "('-modified',)", 'object_name': 'Snippet'},
'client_match_rules': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['base.ClientMatchRule']", 'symmetrical': 'False', 'blank': 'True'}),
'countries': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['base.TargetedCountry']", 'symmetrical': 'False', 'blank': 'True'}),
'country': ('snippets.base.fields.CountryField', [], {'default': "''", 'max_length': '16', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'exclude_from_search_providers': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['base.SearchProvider']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'on_aurora': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'on_beta': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'on_nightly': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'on_release': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'on_startpage_1': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'on_startpage_2': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'on_startpage_3': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'on_startpage_4': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'publish_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'publish_start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.SnippetTemplate']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '100'})
},
u'base.snippetlocale': {
'Meta': {'object_name': 'SnippetLocale'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locale': ('snippets.base.fields.LocaleField', [], {'default': "'en-US'", 'max_length': '32'}),
'snippet': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'locale_set'", 'to': u"orm['base.Snippet']"})
},
u'base.snippettemplate': {
'Meta': {'object_name': 'SnippetTemplate'},
'code': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'base.snippettemplatevariable': {
'Meta': {'object_name': 'SnippetTemplateVariable'},
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'variable_set'", 'to': u"orm['base.SnippetTemplate']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'base.targetedcountry': {
'Meta': {'object_name': 'TargetedCountry'},
'code': ('snippets.base.fields.CountryField', [], {'default': "u'us'", 'max_length': '16'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'base.uploadedfile': {
'Meta': {'object_name': 'UploadedFile'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['base']
symmetrical = True
|
TechWritingWhiz/indy-node
|
refs/heads/master
|
indy_common/test/auth/test_auth_nym.py
|
1
|
from plenum.common.constants import TRUSTEE, STEWARD, VERKEY
from indy_common.auth import Authoriser
from indy_common.constants import ROLE, NYM, TGB, TRUST_ANCHOR
def test_make_trustee(role, is_owner):
authorized = (role == TRUSTEE)
assert authorized == Authoriser.authorised(typ=NYM,
field=ROLE,
actorRole=role,
oldVal=None,
newVal=TRUSTEE,
isActorOwnerOfSubject=is_owner)[0]
def test_make_tgb(role, is_owner):
authorized = (role == TRUSTEE)
assert authorized == Authoriser.authorised(typ=NYM,
field=ROLE,
actorRole=role,
oldVal=None,
newVal=TGB,
isActorOwnerOfSubject=is_owner)[0]
def test_make_steward(role, is_owner):
authorized = (role == TRUSTEE)
assert authorized == Authoriser.authorised(typ=NYM,
field=ROLE,
actorRole=role,
oldVal=None,
newVal=STEWARD,
isActorOwnerOfSubject=is_owner)[0]
def test_make_trust_anchor(role, is_owner):
authorized = role in (TRUSTEE, STEWARD)
assert authorized == Authoriser.authorised(typ=NYM,
field=ROLE,
actorRole=role,
oldVal=None,
newVal=TRUST_ANCHOR,
isActorOwnerOfSubject=is_owner)[0]
def test_remove_trustee(role, is_owner):
authorized = (role == TRUSTEE)
assert authorized == Authoriser.authorised(typ=NYM,
field=ROLE,
actorRole=role,
oldVal=TRUSTEE,
newVal=None,
isActorOwnerOfSubject=is_owner)[0]
def test_remove_tgb(role, is_owner):
authorized = (role == TRUSTEE)
assert authorized == Authoriser.authorised(typ=NYM,
field=ROLE,
actorRole=role,
oldVal=TGB,
newVal=None,
isActorOwnerOfSubject=is_owner)[0]
def test_remove_steward(role, is_owner):
authorized = (role == TRUSTEE)
assert authorized == Authoriser.authorised(typ=NYM,
field=ROLE,
actorRole=role,
oldVal=STEWARD,
newVal=None,
isActorOwnerOfSubject=is_owner)[0]
def test_remove_trust_anchor(role, is_owner):
authorized = (role == TRUSTEE)
assert authorized == Authoriser.authorised(typ=NYM,
field=ROLE,
actorRole=role,
oldVal=TRUST_ANCHOR,
newVal=None,
isActorOwnerOfSubject=is_owner)[0]
def test_change_verkey(role, is_owner, old_values):
authorized = is_owner
assert authorized == Authoriser.authorised(typ=NYM,
field=VERKEY,
actorRole=role,
oldVal=old_values,
newVal="value2",
isActorOwnerOfSubject=is_owner)[0]
|
ivuk/wile
|
refs/heads/master
|
tests/test_wile.py
|
2
|
import os
import pytest
import click
import wile
def test_wile__get_or_gen_key(inside_tmpdir, logcapture, monkeypatch):
account_key_path = 'account.key'
account_key_size = 2048
monkeypatch.setattr(click, 'prompt', lambda *args, **kwargs: u'somepassword')
assert os.listdir(os.curdir) == []
key1 = wile.get_or_gen_key(None, account_key_path, account_key_size)
logcapture.check(
('wile', 'WARNING', 'no account key found; creating a new 2048 bit key in account.key')
)
logcapture.clear()
assert os.listdir(os.curdir) == [account_key_path]
key2 = wile.get_or_gen_key(None, account_key_path, account_key_size)
logcapture.check()
assert key1 == key2
@pytest.mark.parametrize("args", [
[],
['--help'],
])
def test_wile__no_args_or_help(args, clirunner, acmeclientmock_factory):
acmeclientmock = acmeclientmock_factory()
result = clirunner.invoke(wile.wile, args=args)
assert result.output_bytes.startswith(b'Usage:')
assert result.exit_code == 0
assert os.listdir(os.path.expanduser('~')) == []
assert os.listdir(os.curdir) == [] # ensure it's a noop
assert not acmeclientmock.called
def test_wile__version(clirunner, acmeclientmock_factory):
acmeclientmock = acmeclientmock_factory()
result = clirunner.invoke(wile.wile, args=['--version'])
assert ('version %s' % wile._version) in str(result.output_bytes)
assert result.exit_code == 0
assert os.listdir(os.path.expanduser('~')) == []
assert os.listdir(os.curdir) == [] # ensure it's a noop
assert not acmeclientmock.called
|
illicitonion/givabit
|
refs/heads/master
|
lib/sdks/google_appengine_1.7.1/google_appengine/lib/django_1_3/django/db/models/sql/compiler.py
|
52
|
from django.core.exceptions import FieldError
from django.db import connections
from django.db.backends.util import truncate_name
from django.db.models.sql.constants import *
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.sql.query import get_proxied_model, get_order_dir, \
select_related_descend, Query
class SQLCompiler(object):
def __init__(self, query, connection, using):
self.query = query
self.connection = connection
self.using = using
self.quote_cache = {}
def pre_sql_setup(self):
"""
Does any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
"""
if not self.query.tables:
self.query.join((None, self.query.model._meta.db_table, None, None))
if (not self.query.select and self.query.default_cols and not
self.query.included_inherited_models):
self.query.setup_inherited_models()
if self.query.select_related and not self.query.related_select_cols:
self.fill_related_selections()
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if ((name in self.query.alias_map and name not in self.query.table_map) or
name in self.query.extra_select):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
if with_limits and self.query.low_mark == self.query.high_mark:
return '', ()
self.pre_sql_setup()
out_cols = self.get_columns(with_col_aliases)
ordering, ordering_group_by = self.get_ordering()
# This must come after 'select' and 'ordering' -- see docstring of
# get_from_clause() for details.
from_, f_params = self.get_from_clause()
qn = self.quote_name_unless_alias
where, w_params = self.query.where.as_sql(qn=qn, connection=self.connection)
having, h_params = self.query.having.as_sql(qn=qn, connection=self.connection)
params = []
for val in self.query.extra_select.itervalues():
params.extend(val[1])
result = ['SELECT']
if self.query.distinct:
result.append('DISTINCT')
result.append(', '.join(out_cols + self.query.ordering_aliases))
result.append('FROM')
result.extend(from_)
params.extend(f_params)
if where:
result.append('WHERE %s' % where)
params.extend(w_params)
grouping, gb_params = self.get_grouping()
if grouping:
if ordering:
# If the backend can't group by PK (i.e., any database
# other than MySQL), then any fields mentioned in the
# ordering clause needs to be in the group by clause.
if not self.connection.features.allows_group_by_pk:
for col, col_params in ordering_group_by:
if col not in grouping:
grouping.append(str(col))
gb_params.extend(col_params)
else:
ordering = self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
params.extend(gb_params)
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
if ordering:
result.append('ORDER BY %s' % ', '.join(ordering))
if with_limits:
if self.query.high_mark is not None:
result.append('LIMIT %d' % (self.query.high_mark - self.query.low_mark))
if self.query.low_mark:
if self.query.high_mark is None:
val = self.connection.ops.no_limit_value()
if val:
result.append('LIMIT %d' % val)
result.append('OFFSET %d' % self.query.low_mark)
return ' '.join(result), tuple(params)
def as_nested_sql(self):
"""
Perform the same functionality as the as_sql() method, returning an
SQL string and parameters. However, the alias prefixes are bumped
beforehand (in a copy -- the current query isn't changed), and any
ordering is removed if the query is unsliced.
Used when nesting this query inside another.
"""
obj = self.query.clone()
if obj.low_mark == 0 and obj.high_mark is None:
# If there is no slicing in use, then we can safely drop all ordering
obj.clear_ordering(True)
obj.bump_prefix()
return obj.get_compiler(connection=self.connection).as_sql()
def get_columns(self, with_aliases=False):
"""
Returns the list of columns to use in the select statement. If no
columns have been specified, returns all columns relating to fields in
the model.
If 'with_aliases' is true, any column names that are duplicated
(without the table names) are given unique aliases. This is needed in
some cases to avoid ambiguity with nested queries.
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = ['(%s) AS %s' % (col[0], qn2(alias)) for alias, col in self.query.extra_select.iteritems()]
aliases = set(self.query.extra_select.keys())
if with_aliases:
col_aliases = aliases.copy()
else:
col_aliases = set()
if self.query.select:
only_load = self.deferred_to_columns()
for col in self.query.select:
if isinstance(col, (list, tuple)):
alias, column = col
table = self.query.alias_map[alias][TABLE_NAME]
if table in only_load and col not in only_load[table]:
continue
r = '%s.%s' % (qn(alias), qn(column))
if with_aliases:
if col[1] in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append('%s AS %s' % (r, qn2(col[1])))
aliases.add(r)
col_aliases.add(col[1])
else:
result.append(r)
aliases.add(r)
col_aliases.add(col[1])
else:
result.append(col.as_sql(qn, self.connection))
if hasattr(col, 'alias'):
aliases.add(col.alias)
col_aliases.add(col.alias)
elif self.query.default_cols:
cols, new_aliases = self.get_default_columns(with_aliases,
col_aliases)
result.extend(cols)
aliases.update(new_aliases)
max_name_length = self.connection.ops.max_name_length()
result.extend([
'%s%s' % (
aggregate.as_sql(qn, self.connection),
alias is not None
and ' AS %s' % qn(truncate_name(alias, max_name_length))
or ''
)
for alias, aggregate in self.query.aggregate_select.items()
])
for table, col in self.query.related_select_cols:
r = '%s.%s' % (qn(table), qn(col))
if with_aliases and col in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append(r)
aliases.add(r)
col_aliases.add(col)
self._select_aliases = aliases
return result
def get_default_columns(self, with_aliases=False, col_aliases=None,
start_alias=None, opts=None, as_pairs=False, local_only=False):
"""
Computes the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Returns a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, returns a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
opts = self.query.model._meta
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
aliases = set()
only_load = self.deferred_to_columns()
# Skip all proxy to the root proxied model
proxied_model = get_proxied_model(opts)
if start_alias:
seen = {None: start_alias}
for field, model in opts.get_fields_with_model():
if local_only and model is not None:
continue
if start_alias:
try:
alias = seen[model]
except KeyError:
if model is proxied_model:
alias = start_alias
else:
link_field = opts.get_ancestor_link(model)
alias = self.query.join((start_alias, model._meta.db_table,
link_field.column, model._meta.pk.column))
seen[model] = alias
else:
# If we're starting from the base model of the queryset, the
# aliases will have already been set up in pre_sql_setup(), so
# we can save time here.
alias = self.query.included_inherited_models[model]
table = self.query.alias_map[alias][TABLE_NAME]
if table in only_load and field.column not in only_load[table]:
continue
if as_pairs:
result.append((alias, field.column))
aliases.add(alias)
continue
if with_aliases and field.column in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s.%s AS %s' % (qn(alias),
qn2(field.column), c_alias))
col_aliases.add(c_alias)
aliases.add(c_alias)
else:
r = '%s.%s' % (qn(alias), qn2(field.column))
result.append(r)
aliases.add(r)
if with_aliases:
col_aliases.add(field.column)
return result, aliases
def get_ordering(self):
"""
Returns a tuple containing a list representing the SQL elements in the
"order by" clause, and the list of SQL elements that need to be added
to the GROUP BY clause as a result of the ordering.
Also sets the ordering_aliases attribute on this instance to a list of
extra aliases needed in the select.
Determining the ordering SQL can change the tables we need to include,
so this should be run *before* get_from_clause().
"""
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
else:
ordering = self.query.order_by or self.query.model._meta.ordering
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
distinct = self.query.distinct
select_aliases = self._select_aliases
result = []
group_by = []
ordering_aliases = []
if self.query.standard_ordering:
asc, desc = ORDER_DIR['ASC']
else:
asc, desc = ORDER_DIR['DESC']
# It's possible, due to model inheritance, that normal usage might try
# to include the same field more than once in the ordering. We track
# the table/column pairs we use and discard any after the first use.
processed_pairs = set()
for field in ordering:
if field == '?':
result.append(self.connection.ops.random_function_sql())
continue
if isinstance(field, int):
if field < 0:
order = desc
field = -field
else:
order = asc
result.append('%s %s' % (field, order))
group_by.append((field, []))
continue
col, order = get_order_dir(field, asc)
if col in self.query.aggregate_select:
result.append('%s %s' % (qn(col), order))
continue
if '.' in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split('.', 1)
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), col)
processed_pairs.add((table, col))
if not distinct or elt in select_aliases:
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
elif get_order_dir(field)[0] not in self.query.extra_select:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
for table, col, order in self.find_ordering_name(field,
self.query.model._meta, default_order=asc):
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), qn2(col))
processed_pairs.add((table, col))
if distinct and elt not in select_aliases:
ordering_aliases.append(elt)
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
else:
elt = qn2(col)
if distinct and col not in select_aliases:
ordering_aliases.append(elt)
result.append('%s %s' % (elt, order))
group_by.append(self.query.extra_select[col])
self.query.ordering_aliases = ordering_aliases
return result, group_by
def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
already_seen=None):
"""
Returns the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
pieces = name.split(LOOKUP_SEP)
if not alias:
alias = self.query.get_initial_alias()
field, target, opts, joins, last, extra = self.query.setup_joins(pieces,
opts, alias, False)
alias = joins[-1]
col = target.column
if not field.rel:
# To avoid inadvertent trimming of a necessary alias, use the
# refcount to show that we are referencing a non-relation field on
# the model.
self.query.ref_alias(alias)
# Must use left outer joins for nullable fields and their relations.
self.query.promote_alias_chain(joins,
self.query.alias_map[joins[0]][JOIN_TYPE] == self.query.LOUTER)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model.
if field.rel and len(joins) > 1 and opts.ordering:
# Firstly, avoid infinite loops.
if not already_seen:
already_seen = set()
join_tuple = tuple([self.query.alias_map[j][TABLE_NAME] for j in joins])
if join_tuple in already_seen:
raise FieldError('Infinite loop caused by ordering.')
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
results.extend(self.find_ordering_name(item, opts, alias,
order, already_seen))
return results
if alias:
# We have to do the same "final join" optimisation as in
# add_filter, since the final column might not otherwise be part of
# the select set (so we can't order on it).
while 1:
join = self.query.alias_map[alias]
if col != join[RHS_JOIN_COL]:
break
self.query.unref_alias(alias)
alias = join[LHS_ALIAS]
col = join[LHS_JOIN_COL]
return [(alias, col, order)]
def get_from_clause(self):
"""
Returns a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Sub-classes, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables we need. This means the select columns and
ordering must be done first.
"""
result = []
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
first = True
for alias in self.query.tables:
if not self.query.alias_refcount[alias]:
continue
try:
name, alias, join_type, lhs, lhs_col, col, nullable = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
alias_str = (alias != name and ' %s' % alias or '')
if join_type and not first:
result.append('%s %s%s ON (%s.%s = %s.%s)'
% (join_type, qn(name), alias_str, qn(lhs),
qn2(lhs_col), qn(alias), qn2(col)))
else:
connector = not first and ', ' or ''
result.append('%s%s%s' % (connector, qn(name), alias_str))
first = False
for t in self.query.extra_tables:
alias, unused = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# calls increments the refcount, so an alias refcount of one means
# this is the only reference.
if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:
connector = not first and ', ' or ''
result.append('%s%s' % (connector, qn(alias)))
first = False
return result, []
def get_grouping(self):
"""
Returns a tuple representing the SQL elements in the "group by" clause.
"""
qn = self.quote_name_unless_alias
result, params = [], []
if self.query.group_by is not None:
if (len(self.query.model._meta.fields) == len(self.query.select) and
self.connection.features.allows_group_by_pk):
self.query.group_by = [
(self.query.model._meta.db_table, self.query.model._meta.pk.column)
]
group_by = self.query.group_by or []
extra_selects = []
for extra_select, extra_params in self.query.extra_select.itervalues():
extra_selects.append(extra_select)
params.extend(extra_params)
cols = (group_by + self.query.select +
self.query.related_select_cols + extra_selects)
for col in cols:
if isinstance(col, (list, tuple)):
result.append('%s.%s' % (qn(col[0]), qn(col[1])))
elif hasattr(col, 'as_sql'):
result.append(col.as_sql(qn, self.connection))
else:
result.append('(%s)' % str(col))
return result, params
def fill_related_selections(self, opts=None, root_alias=None, cur_depth=1,
used=None, requested=None, restricted=None, nullable=None,
dupe_set=None, avoid_set=None):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
if not restricted and self.query.max_depth and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
self.query.related_select_cols = []
self.query.related_select_fields = []
if not used:
used = set()
if dupe_set is None:
dupe_set = set()
if avoid_set is None:
avoid_set = set()
orig_dupe_set = dupe_set
# Setup for the case when only particular related fields should be
# included in the related selection.
if requested is None:
if isinstance(self.query.select_related, dict):
requested = self.query.select_related
restricted = True
else:
restricted = False
for f, model in opts.get_fields_with_model():
if not select_related_descend(f, restricted, requested):
continue
# The "avoid" set is aliases we want to avoid just for this
# particular branch of the recursion. They aren't permanently
# forbidden from reuse in the related selection tables (which is
# what "used" specifies).
avoid = avoid_set.copy()
dupe_set = orig_dupe_set.copy()
table = f.rel.to._meta.db_table
promote = nullable or f.null
if model:
int_opts = opts
alias = root_alias
alias_chain = []
for int_model in opts.get_base_chain(model):
# Proxy model have elements in base chain
# with no parents, assign the new options
# object and skip to the next base in that
# case
if not int_opts.parents[int_model]:
int_opts = int_model._meta
continue
lhs_col = int_opts.parents[int_model].column
dedupe = lhs_col in opts.duplicate_targets
if dedupe:
avoid.update(self.query.dupe_avoidance.get((id(opts), lhs_col),
()))
dupe_set.add((opts, lhs_col))
int_opts = int_model._meta
alias = self.query.join((alias, int_opts.db_table, lhs_col,
int_opts.pk.column), exclusions=used,
promote=promote)
alias_chain.append(alias)
for (dupe_opts, dupe_col) in dupe_set:
self.query.update_dupe_avoidance(dupe_opts, dupe_col, alias)
if self.query.alias_map[root_alias][JOIN_TYPE] == self.query.LOUTER:
self.query.promote_alias_chain(alias_chain, True)
else:
alias = root_alias
dedupe = f.column in opts.duplicate_targets
if dupe_set or dedupe:
avoid.update(self.query.dupe_avoidance.get((id(opts), f.column), ()))
if dedupe:
dupe_set.add((opts, f.column))
alias = self.query.join((alias, table, f.column,
f.rel.get_related_field().column),
exclusions=used.union(avoid), promote=promote)
used.add(alias)
columns, aliases = self.get_default_columns(start_alias=alias,
opts=f.rel.to._meta, as_pairs=True)
self.query.related_select_cols.extend(columns)
if self.query.alias_map[alias][JOIN_TYPE] == self.query.LOUTER:
self.query.promote_alias_chain(aliases, True)
self.query.related_select_fields.extend(f.rel.to._meta.fields)
if restricted:
next = requested.get(f.name, {})
else:
next = False
new_nullable = f.null or promote
for dupe_opts, dupe_col in dupe_set:
self.query.update_dupe_avoidance(dupe_opts, dupe_col, alias)
self.fill_related_selections(f.rel.to._meta, alias, cur_depth + 1,
used, next, restricted, new_nullable, dupe_set, avoid)
if restricted:
related_fields = [
(o.field, o.model)
for o in opts.get_all_related_objects()
if o.field.unique
]
for f, model in related_fields:
if not select_related_descend(f, restricted, requested, reverse=True):
continue
# The "avoid" set is aliases we want to avoid just for this
# particular branch of the recursion. They aren't permanently
# forbidden from reuse in the related selection tables (which is
# what "used" specifies).
avoid = avoid_set.copy()
dupe_set = orig_dupe_set.copy()
table = model._meta.db_table
int_opts = opts
alias = root_alias
alias_chain = []
chain = opts.get_base_chain(f.rel.to)
if chain is not None:
for int_model in chain:
# Proxy model have elements in base chain
# with no parents, assign the new options
# object and skip to the next base in that
# case
if not int_opts.parents[int_model]:
int_opts = int_model._meta
continue
lhs_col = int_opts.parents[int_model].column
dedupe = lhs_col in opts.duplicate_targets
if dedupe:
avoid.update((self.query.dupe_avoidance.get(id(opts), lhs_col),
()))
dupe_set.add((opts, lhs_col))
int_opts = int_model._meta
alias = self.query.join(
(alias, int_opts.db_table, lhs_col, int_opts.pk.column),
exclusions=used, promote=True, reuse=used
)
alias_chain.append(alias)
for dupe_opts, dupe_col in dupe_set:
self.query.update_dupe_avoidance(dupe_opts, dupe_col, alias)
dedupe = f.column in opts.duplicate_targets
if dupe_set or dedupe:
avoid.update(self.query.dupe_avoidance.get((id(opts), f.column), ()))
if dedupe:
dupe_set.add((opts, f.column))
alias = self.query.join(
(alias, table, f.rel.get_related_field().column, f.column),
exclusions=used.union(avoid),
promote=True
)
used.add(alias)
columns, aliases = self.get_default_columns(start_alias=alias,
opts=model._meta, as_pairs=True, local_only=True)
self.query.related_select_cols.extend(columns)
self.query.related_select_fields.extend(model._meta.fields)
next = requested.get(f.related_query_name(), {})
new_nullable = f.null or None
self.fill_related_selections(model._meta, table, cur_depth+1,
used, next, restricted, new_nullable)
def deferred_to_columns(self):
"""
Converts the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Returns the
dictionary.
"""
columns = {}
self.query.deferred_to_data(columns, self.query.deferred_to_columns_cb)
return columns
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
fields = None
has_aggregate_select = bool(self.query.aggregate_select)
for rows in self.execute_sql(MULTI):
for row in rows:
if resolve_columns:
if fields is None:
# We only set this up here because
# related_select_fields isn't populated until
# execute_sql() has been called.
if self.query.select_fields:
fields = self.query.select_fields + self.query.related_select_fields
else:
fields = self.query.model._meta.fields
# If the field was deferred, exclude it from being passed
# into `resolve_columns` because it wasn't selected.
only_load = self.deferred_to_columns()
if only_load:
db_table = self.query.model._meta.db_table
fields = [f for f in fields if db_table in only_load and
f.column in only_load[db_table]]
row = self.resolve_columns(row, fields)
if has_aggregate_select:
aggregate_start = len(self.query.extra_select.keys()) + len(self.query.select)
aggregate_end = aggregate_start + len(self.query.aggregate_select)
row = tuple(row[:aggregate_start]) + tuple([
self.query.resolve_aggregate(value, aggregate, self.connection)
for (alias, aggregate), value
in zip(self.query.aggregate_select.items(), row[aggregate_start:aggregate_end])
]) + tuple(row[aggregate_end:])
yield row
def execute_sql(self, result_type=MULTI):
"""
Run the query against the database and returns the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return empty_iter()
else:
return
cursor = self.connection.cursor()
cursor.execute(sql, params)
if not result_type:
return cursor
if result_type == SINGLE:
if self.query.ordering_aliases:
return cursor.fetchone()[:-len(self.query.ordering_aliases)]
return cursor.fetchone()
# The MULTI case.
if self.query.ordering_aliases:
result = order_modified_iter(cursor, len(self.query.ordering_aliases),
self.connection.features.empty_fetchmany_value)
else:
result = iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
self.connection.features.empty_fetchmany_value)
if not self.connection.features.can_use_chunked_reads:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further.
return list(result)
return result
class SQLInsertCompiler(SQLCompiler):
def placeholder(self, field, val):
if field is None:
# A field value of None means the value is raw.
return val
elif hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
return field.get_placeholder(val, self.connection)
else:
# Return the common case for the placeholder
return '%s'
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.model._meta
result = ['INSERT INTO %s' % qn(opts.db_table)]
result.append('(%s)' % ', '.join([qn(c) for c in self.query.columns]))
values = [self.placeholder(*v) for v in self.query.values]
result.append('VALUES (%s)' % ', '.join(values))
params = self.query.params
if self.return_id and self.connection.features.can_return_id_from_insert:
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
r_fmt, r_params = self.connection.ops.return_insert_id()
result.append(r_fmt % col)
params = params + r_params
return ' '.join(result), params
def execute_sql(self, return_id=False):
self.return_id = return_id
cursor = super(SQLInsertCompiler, self).execute_sql(None)
if not (return_id and cursor):
return
if self.connection.features.can_return_id_from_insert:
return self.connection.ops.fetch_returned_insert_id(cursor)
return self.connection.ops.last_insert_id(cursor,
self.query.model._meta.db_table, self.query.model._meta.pk.column)
class SQLDeleteCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
assert len(self.query.tables) == 1, \
"Can only delete from one table at a time."
qn = self.quote_name_unless_alias
result = ['DELETE FROM %s' % qn(self.query.tables[0])]
where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
result.append('WHERE %s' % where)
return ' '.join(result), tuple(params)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
from django.db.models.base import Model
self.pre_sql_setup()
if not self.query.values:
return '', ()
table = self.query.tables[0]
qn = self.quote_name_unless_alias
result = ['UPDATE %s' % qn(table)]
result.append('SET')
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, 'prepare_database_save'):
val = val.prepare_database_save(field)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self.connection)
else:
placeholder = '%s'
if hasattr(val, 'evaluate'):
val = SQLEvaluator(val, self.query, allow_joins=False)
name = field.column
if hasattr(val, 'as_sql'):
sql, params = val.as_sql(qn, self.connection)
values.append('%s = %s' % (qn(name), sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
if not values:
return '', ()
result.append(', '.join(values))
where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Returns the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super(SQLUpdateCompiler, self).execute_sql(result_type)
rows = cursor and cursor.rowcount or 0
is_empty = cursor is None
del cursor
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, we need to do some
munging of the "where" conditions to match the format required for
(portable) SQL updates. That is done here.
Further, if we are going to be running multiple updates, we pull out
the id values to update at this point so that they don't change as a
result of the progressive updates.
"""
self.query.select_related = False
self.query.clear_ordering(True)
super(SQLUpdateCompiler, self).pre_sql_setup()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
# We need to use a sub-select in the where clause to filter on things
# from other tables.
query = self.query.clone(klass=Query)
query.bump_prefix()
query.extra = {}
query.select = []
query.add_fields([query.model._meta.pk.name])
must_pre_select = count > 1 and not self.connection.features.update_can_self_select
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.where = self.query.where_class()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend([r[0] for r in rows])
self.query.add_filter(('pk__in', idents))
self.query.related_ids = idents
else:
# The fast path. Filters and updates in one query.
self.query.add_filter(('pk__in', query))
for alias in self.query.tables[1:]:
self.query.alias_refcount[alias] = 0
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self, qn=None):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
if qn is None:
qn = self.quote_name_unless_alias
sql = ('SELECT %s FROM (%s) subquery' % (
', '.join([
aggregate.as_sql(qn, self.connection)
for aggregate in self.query.aggregate_select.values()
]),
self.query.subquery)
)
params = self.query.sub_params
return (sql, params)
class SQLDateCompiler(SQLCompiler):
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
if resolve_columns:
from django.db.models.fields import DateTimeField
fields = [DateTimeField()]
else:
from django.db.backends.util import typecast_timestamp
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.query.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
date = row[offset]
if resolve_columns:
date = self.resolve_columns(row, fields)[offset]
elif needs_string_cast:
date = typecast_timestamp(str(date))
yield date
def empty_iter():
"""
Returns an iterator containing no results.
"""
yield iter([]).next()
def order_modified_iter(cursor, trim, sentinel):
"""
Yields blocks of rows from a cursor. We use this iterator in the special
case when extra output columns have been added to support ordering
requirements. We must trim those extra columns before anything else can use
the results, since they're only needed to make the SQL valid.
"""
for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
sentinel):
yield [r[:-trim] for r in rows]
|
joshka/SoundCloud2.Bundle
|
refs/heads/master
|
Contents/Libraries/Shared/simplejson/tests/test_pass1.py
|
147
|
from unittest import TestCase
import simplejson as json
# from http://json.org/JSON_checker/test/pass1.json
JSON = r'''
[
"JSON Test Pattern pass1",
{"object with 1 member":["array with 1 element"]},
{},
[],
-42,
true,
false,
null,
{
"integer": 1234567890,
"real": -9876.543210,
"e": 0.123456789e-12,
"E": 1.234567890E+34,
"": 23456789012E66,
"zero": 0,
"one": 1,
"space": " ",
"quote": "\"",
"backslash": "\\",
"controls": "\b\f\n\r\t",
"slash": "/ & \/",
"alpha": "abcdefghijklmnopqrstuvwyz",
"ALPHA": "ABCDEFGHIJKLMNOPQRSTUVWYZ",
"digit": "0123456789",
"special": "`1~!@#$%^&*()_+-={':[,]}|;.</>?",
"hex": "\u0123\u4567\u89AB\uCDEF\uabcd\uef4A",
"true": true,
"false": false,
"null": null,
"array":[ ],
"object":{ },
"address": "50 St. James Street",
"url": "http://www.JSON.org/",
"comment": "// /* <!-- --",
"# -- --> */": " ",
" s p a c e d " :[1,2 , 3
,
4 , 5 , 6 ,7 ],"compact": [1,2,3,4,5,6,7],
"jsontext": "{\"object with 1 member\":[\"array with 1 element\"]}",
"quotes": "" \u0022 %22 0x22 034 "",
"\/\\\"\uCAFE\uBABE\uAB98\uFCDE\ubcda\uef4A\b\f\n\r\t`1~!@#$%^&*()_+-=[]{}|;:',./<>?"
: "A key can be any string"
},
0.5 ,98.6
,
99.44
,
1066,
1e1,
0.1e1,
1e-1,
1e00,2e+00,2e-00
,"rosebud"]
'''
class TestPass1(TestCase):
def test_parse(self):
# test in/out equivalence and parsing
res = json.loads(JSON)
out = json.dumps(res)
self.assertEqual(res, json.loads(out))
|
SebastianLloret/Clever-Bot
|
refs/heads/master
|
http/cookies.py
|
54
|
from __future__ import absolute_import
import sys
assert sys.version_info[0] < 3
from Cookie import *
from Cookie import Morsel # left out of __all__ on Py2.7!
|
edmorley/treeherder
|
refs/heads/master
|
tests/__init__.py
|
12133432
| |
mollstam/UnrealPy
|
refs/heads/master
|
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/httplib2-0.9.1/python2/httplib2/test/brokensocket/socket.py
|
314
|
from realsocket import gaierror, error, getaddrinfo, SOCK_STREAM
|
konstruktoid/ansible-upstream
|
refs/heads/devel
|
lib/ansible/modules/web_infrastructure/deploy_helper.py
|
149
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Jasper N. Brouwer <jasper@nerdsweide.nl>
# (c) 2014, Ramon de la Fuente <ramon@delafuente.nl>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: deploy_helper
version_added: "2.0"
author: "Ramon de la Fuente (@ramondelafuente)"
short_description: Manages some of the steps common in deploying projects.
description:
- The Deploy Helper manages some of the steps common in deploying software.
It creates a folder structure, manages a symlink for the current release
and cleans up old releases.
- "Running it with the C(state=query) or C(state=present) will return the C(deploy_helper) fact.
C(project_path), whatever you set in the path parameter,
C(current_path), the path to the symlink that points to the active release,
C(releases_path), the path to the folder to keep releases in,
C(shared_path), the path to the folder to keep shared resources in,
C(unfinished_filename), the file to check for to recognize unfinished builds,
C(previous_release), the release the 'current' symlink is pointing to,
C(previous_release_path), the full path to the 'current' symlink target,
C(new_release), either the 'release' parameter or a generated timestamp,
C(new_release_path), the path to the new release folder (not created by the module)."
options:
path:
required: True
aliases: ['dest']
description:
- the root path of the project. Alias I(dest).
Returned in the C(deploy_helper.project_path) fact.
state:
description:
- the state of the project.
C(query) will only gather facts,
C(present) will create the project I(root) folder, and in it the I(releases) and I(shared) folders,
C(finalize) will remove the unfinished_filename file, create a symlink to the newly
deployed release and optionally clean old releases,
C(clean) will remove failed & old releases,
C(absent) will remove the project folder (synonymous to the M(file) module with C(state=absent))
choices: [ present, finalize, absent, clean, query ]
default: present
release:
description:
- the release version that is being deployed. Defaults to a timestamp format %Y%m%d%H%M%S (i.e. '20141119223359').
This parameter is optional during C(state=present), but needs to be set explicitly for C(state=finalize).
You can use the generated fact C(release={{ deploy_helper.new_release }}).
releases_path:
description:
- the name of the folder that will hold the releases. This can be relative to C(path) or absolute.
Returned in the C(deploy_helper.releases_path) fact.
default: releases
shared_path:
description:
- the name of the folder that will hold the shared resources. This can be relative to C(path) or absolute.
If this is set to an empty string, no shared folder will be created.
Returned in the C(deploy_helper.shared_path) fact.
default: shared
current_path:
description:
- the name of the symlink that is created when the deploy is finalized. Used in C(finalize) and C(clean).
Returned in the C(deploy_helper.current_path) fact.
default: current
unfinished_filename:
description:
- the name of the file that indicates a deploy has not finished. All folders in the releases_path that
contain this file will be deleted on C(state=finalize) with clean=True, or C(state=clean). This file is
automatically deleted from the I(new_release_path) during C(state=finalize).
default: DEPLOY_UNFINISHED
clean:
description:
- Whether to run the clean procedure in case of C(state=finalize).
type: bool
default: 'yes'
keep_releases:
description:
- the number of old releases to keep when cleaning. Used in C(finalize) and C(clean). Any unfinished builds
will be deleted first, so only correct releases will count. The current version will not count.
default: 5
notes:
- Facts are only returned for C(state=query) and C(state=present). If you use both, you should pass any overridden
parameters to both calls, otherwise the second call will overwrite the facts of the first one.
- When using C(state=clean), the releases are ordered by I(creation date). You should be able to switch to a
new naming strategy without problems.
- Because of the default behaviour of generating the I(new_release) fact, this module will not be idempotent
unless you pass your own release name with C(release). Due to the nature of deploying software, this should not
be much of a problem.
'''
EXAMPLES = '''
# General explanation, starting with an example folder structure for a project:
# root:
# releases:
# - 20140415234508
# - 20140415235146
# - 20140416082818
#
# shared:
# - sessions
# - uploads
#
# current: releases/20140416082818
# The 'releases' folder holds all the available releases. A release is a complete build of the application being
# deployed. This can be a clone of a repository for example, or a sync of a local folder on your filesystem.
# Having timestamped folders is one way of having distinct releases, but you could choose your own strategy like
# git tags or commit hashes.
#
# During a deploy, a new folder should be created in the releases folder and any build steps required should be
# performed. Once the new build is ready, the deploy procedure is 'finalized' by replacing the 'current' symlink
# with a link to this build.
#
# The 'shared' folder holds any resource that is shared between releases. Examples of this are web-server
# session files, or files uploaded by users of your application. It's quite common to have symlinks from a release
# folder pointing to a shared/subfolder, and creating these links would be automated as part of the build steps.
#
# The 'current' symlink points to one of the releases. Probably the latest one, unless a deploy is in progress.
# The web-server's root for the project will go through this symlink, so the 'downtime' when switching to a new
# release is reduced to the time it takes to switch the link.
#
# To distinguish between successful builds and unfinished ones, a file can be placed in the folder of the release
# that is currently in progress. The existence of this file will mark it as unfinished, and allow an automated
# procedure to remove it during cleanup.
# Typical usage
- name: Initialize the deploy root and gather facts
deploy_helper:
path: /path/to/root
- name: Clone the project to the new release folder
git:
repo: git://foosball.example.org/path/to/repo.git
dest: '{{ deploy_helper.new_release_path }}'
version: v1.1.1
- name: Add an unfinished file, to allow cleanup on successful finalize
file:
path: '{{ deploy_helper.new_release_path }}/{{ deploy_helper.unfinished_filename }}'
state: touch
- name: Perform some build steps, like running your dependency manager for example
composer:
command: install
working_dir: '{{ deploy_helper.new_release_path }}'
- name: Create some folders in the shared folder
file:
path: '{{ deploy_helper.shared_path }}/{{ item }}'
state: directory
with_items:
- sessions
- uploads
- name: Add symlinks from the new release to the shared folder
file:
path: '{{ deploy_helper.new_release_path }}/{{ item.path }}'
src: '{{ deploy_helper.shared_path }}/{{ item.src }}'
state: link
with_items:
- path: app/sessions
src: sessions
- path: web/uploads
src: uploads
- name: Finalize the deploy, removing the unfinished file and switching the symlink
deploy_helper:
path: /path/to/root
release: '{{ deploy_helper.new_release }}'
state: finalize
# Retrieving facts before running a deploy
- name: Run 'state=query' to gather facts without changing anything
deploy_helper:
path: /path/to/root
state: query
# Remember to set the 'release' parameter when you actually call 'state=present' later
- name: Initialize the deploy root
deploy_helper:
path: /path/to/root
release: '{{ deploy_helper.new_release }}'
state: present
# all paths can be absolute or relative (to the 'path' parameter)
- deploy_helper:
path: /path/to/root
releases_path: /var/www/project/releases
shared_path: /var/www/shared
current_path: /var/www/active
# Using your own naming strategy for releases (a version tag in this case):
- deploy_helper:
path: /path/to/root
release: v1.1.1
state: present
- deploy_helper:
path: /path/to/root
release: '{{ deploy_helper.new_release }}'
state: finalize
# Using a different unfinished_filename:
- deploy_helper:
path: /path/to/root
unfinished_filename: README.md
release: '{{ deploy_helper.new_release }}'
state: finalize
# Postponing the cleanup of older builds:
- deploy_helper:
path: /path/to/root
release: '{{ deploy_helper.new_release }}'
state: finalize
clean: False
- deploy_helper:
path: /path/to/root
state: clean
# Or running the cleanup ahead of the new deploy
- deploy_helper:
path: /path/to/root
state: clean
- deploy_helper:
path: /path/to/root
state: present
# Keeping more old releases:
- deploy_helper:
path: /path/to/root
release: '{{ deploy_helper.new_release }}'
state: finalize
keep_releases: 10
# Or, if you use 'clean=false' on finalize:
- deploy_helper:
path: /path/to/root
state: clean
keep_releases: 10
# Removing the entire project root folder
- deploy_helper:
path: /path/to/root
state: absent
# Debugging the facts returned by the module
- deploy_helper:
path: /path/to/root
- debug:
var: deploy_helper
'''
import os
import shutil
import time
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
class DeployHelper(object):
def __init__(self, module):
self.module = module
self.file_args = module.load_file_common_arguments(module.params)
self.clean = module.params['clean']
self.current_path = module.params['current_path']
self.keep_releases = module.params['keep_releases']
self.path = module.params['path']
self.release = module.params['release']
self.releases_path = module.params['releases_path']
self.shared_path = module.params['shared_path']
self.state = module.params['state']
self.unfinished_filename = module.params['unfinished_filename']
def gather_facts(self):
current_path = os.path.join(self.path, self.current_path)
releases_path = os.path.join(self.path, self.releases_path)
if self.shared_path:
shared_path = os.path.join(self.path, self.shared_path)
else:
shared_path = None
previous_release, previous_release_path = self._get_last_release(current_path)
if not self.release and (self.state == 'query' or self.state == 'present'):
self.release = time.strftime("%Y%m%d%H%M%S")
if self.release:
new_release_path = os.path.join(releases_path, self.release)
else:
new_release_path = None
return {
'project_path': self.path,
'current_path': current_path,
'releases_path': releases_path,
'shared_path': shared_path,
'previous_release': previous_release,
'previous_release_path': previous_release_path,
'new_release': self.release,
'new_release_path': new_release_path,
'unfinished_filename': self.unfinished_filename
}
def delete_path(self, path):
if not os.path.lexists(path):
return False
if not os.path.isdir(path):
self.module.fail_json(msg="%s exists but is not a directory" % path)
if not self.module.check_mode:
try:
shutil.rmtree(path, ignore_errors=False)
except Exception as e:
self.module.fail_json(msg="rmtree failed: %s" % to_native(e), exception=traceback.format_exc())
return True
def create_path(self, path):
changed = False
if not os.path.lexists(path):
changed = True
if not self.module.check_mode:
os.makedirs(path)
elif not os.path.isdir(path):
self.module.fail_json(msg="%s exists but is not a directory" % path)
changed += self.module.set_directory_attributes_if_different(self._get_file_args(path), changed)
return changed
def check_link(self, path):
if os.path.lexists(path):
if not os.path.islink(path):
self.module.fail_json(msg="%s exists but is not a symbolic link" % path)
def create_link(self, source, link_name):
changed = False
if os.path.islink(link_name):
norm_link = os.path.normpath(os.path.realpath(link_name))
norm_source = os.path.normpath(os.path.realpath(source))
if norm_link == norm_source:
changed = False
else:
changed = True
if not self.module.check_mode:
if not os.path.lexists(source):
self.module.fail_json(msg="the symlink target %s doesn't exists" % source)
tmp_link_name = link_name + '.' + self.unfinished_filename
if os.path.islink(tmp_link_name):
os.unlink(tmp_link_name)
os.symlink(source, tmp_link_name)
os.rename(tmp_link_name, link_name)
else:
changed = True
if not self.module.check_mode:
os.symlink(source, link_name)
return changed
def remove_unfinished_file(self, new_release_path):
changed = False
unfinished_file_path = os.path.join(new_release_path, self.unfinished_filename)
if os.path.lexists(unfinished_file_path):
changed = True
if not self.module.check_mode:
os.remove(unfinished_file_path)
return changed
def remove_unfinished_builds(self, releases_path):
changes = 0
for release in os.listdir(releases_path):
if os.path.isfile(os.path.join(releases_path, release, self.unfinished_filename)):
if self.module.check_mode:
changes += 1
else:
changes += self.delete_path(os.path.join(releases_path, release))
return changes
def remove_unfinished_link(self, path):
changed = False
tmp_link_name = os.path.join(path, self.release + '.' + self.unfinished_filename)
if not self.module.check_mode and os.path.exists(tmp_link_name):
changed = True
os.remove(tmp_link_name)
return changed
def cleanup(self, releases_path, reserve_version):
changes = 0
if os.path.lexists(releases_path):
releases = [f for f in os.listdir(releases_path) if os.path.isdir(os.path.join(releases_path, f))]
try:
releases.remove(reserve_version)
except ValueError:
pass
if not self.module.check_mode:
releases.sort(key=lambda x: os.path.getctime(os.path.join(releases_path, x)), reverse=True)
for release in releases[self.keep_releases:]:
changes += self.delete_path(os.path.join(releases_path, release))
elif len(releases) > self.keep_releases:
changes += (len(releases) - self.keep_releases)
return changes
def _get_file_args(self, path):
file_args = self.file_args.copy()
file_args['path'] = path
return file_args
def _get_last_release(self, current_path):
previous_release = None
previous_release_path = None
if os.path.lexists(current_path):
previous_release_path = os.path.realpath(current_path)
previous_release = os.path.basename(previous_release_path)
return previous_release, previous_release_path
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(aliases=['dest'], required=True, type='path'),
release=dict(required=False, type='str', default=None),
releases_path=dict(required=False, type='str', default='releases'),
shared_path=dict(required=False, type='path', default='shared'),
current_path=dict(required=False, type='path', default='current'),
keep_releases=dict(required=False, type='int', default=5),
clean=dict(required=False, type='bool', default=True),
unfinished_filename=dict(required=False, type='str', default='DEPLOY_UNFINISHED'),
state=dict(required=False, choices=['present', 'absent', 'clean', 'finalize', 'query'], default='present')
),
add_file_common_args=True,
supports_check_mode=True
)
deploy_helper = DeployHelper(module)
facts = deploy_helper.gather_facts()
result = {
'state': deploy_helper.state
}
changes = 0
if deploy_helper.state == 'query':
result['ansible_facts'] = {'deploy_helper': facts}
elif deploy_helper.state == 'present':
deploy_helper.check_link(facts['current_path'])
changes += deploy_helper.create_path(facts['project_path'])
changes += deploy_helper.create_path(facts['releases_path'])
if deploy_helper.shared_path:
changes += deploy_helper.create_path(facts['shared_path'])
result['ansible_facts'] = {'deploy_helper': facts}
elif deploy_helper.state == 'finalize':
if not deploy_helper.release:
module.fail_json(msg="'release' is a required parameter for state=finalize (try the 'deploy_helper.new_release' fact)")
if deploy_helper.keep_releases <= 0:
module.fail_json(msg="'keep_releases' should be at least 1")
changes += deploy_helper.remove_unfinished_file(facts['new_release_path'])
changes += deploy_helper.create_link(facts['new_release_path'], facts['current_path'])
if deploy_helper.clean:
changes += deploy_helper.remove_unfinished_link(facts['project_path'])
changes += deploy_helper.remove_unfinished_builds(facts['releases_path'])
changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release'])
elif deploy_helper.state == 'clean':
changes += deploy_helper.remove_unfinished_link(facts['project_path'])
changes += deploy_helper.remove_unfinished_builds(facts['releases_path'])
changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release'])
elif deploy_helper.state == 'absent':
# destroy the facts
result['ansible_facts'] = {'deploy_helper': []}
changes += deploy_helper.delete_path(facts['project_path'])
if changes > 0:
result['changed'] = True
else:
result['changed'] = False
module.exit_json(**result)
if __name__ == '__main__':
main()
|
frappe/erpnext
|
refs/heads/develop
|
erpnext/controllers/taxes_and_totals.py
|
1
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import json
import frappe, erpnext
from frappe import _, scrub
from frappe.utils import cint, flt, round_based_on_smallest_currency_fraction
from erpnext.controllers.accounts_controller import validate_conversion_rate, \
validate_taxes_and_charges, validate_inclusive_tax
from erpnext.stock.get_item_details import _get_item_tax_template
from erpnext.accounts.doctype.pricing_rule.utils import get_applied_pricing_rules
from erpnext.accounts.doctype.journal_entry.journal_entry import get_exchange_rate
class calculate_taxes_and_totals(object):
def __init__(self, doc):
self.doc = doc
frappe.flags.round_off_applicable_accounts = []
get_round_off_applicable_accounts(self.doc.company, frappe.flags.round_off_applicable_accounts)
self.calculate()
def calculate(self):
if not len(self.doc.get("items")):
return
self.discount_amount_applied = False
self._calculate()
if self.doc.meta.get_field("discount_amount"):
self.set_discount_amount()
self.apply_discount_amount()
if self.doc.doctype in ["Sales Invoice", "Purchase Invoice"]:
self.calculate_total_advance()
if self.doc.meta.get_field("other_charges_calculation"):
self.set_item_wise_tax_breakup()
def _calculate(self):
self.validate_conversion_rate()
self.calculate_item_values()
self.validate_item_tax_template()
self.initialize_taxes()
self.determine_exclusive_rate()
self.calculate_net_total()
self.calculate_taxes()
self.manipulate_grand_total_for_inclusive_tax()
self.calculate_totals()
self._cleanup()
self.calculate_total_net_weight()
def validate_item_tax_template(self):
for item in self.doc.get('items'):
if item.item_code and item.get('item_tax_template'):
item_doc = frappe.get_cached_doc("Item", item.item_code)
args = {
'net_rate': item.net_rate or item.rate,
'tax_category': self.doc.get('tax_category'),
'posting_date': self.doc.get('posting_date'),
'bill_date': self.doc.get('bill_date'),
'transaction_date': self.doc.get('transaction_date'),
'company': self.doc.get('company')
}
item_group = item_doc.item_group
item_group_taxes = []
while item_group:
item_group_doc = frappe.get_cached_doc('Item Group', item_group)
item_group_taxes += item_group_doc.taxes or []
item_group = item_group_doc.parent_item_group
item_taxes = item_doc.taxes or []
if not item_group_taxes and (not item_taxes):
# No validation if no taxes in item or item group
continue
taxes = _get_item_tax_template(args, item_taxes + item_group_taxes, for_validate=True)
if taxes:
if item.item_tax_template not in taxes:
item.item_tax_template = taxes[0]
frappe.msgprint(_("Row {0}: Item Tax template updated as per validity and rate applied").format(
item.idx, frappe.bold(item.item_code)
))
def validate_conversion_rate(self):
# validate conversion rate
company_currency = erpnext.get_company_currency(self.doc.company)
if not self.doc.currency or self.doc.currency == company_currency:
self.doc.currency = company_currency
self.doc.conversion_rate = 1.0
else:
validate_conversion_rate(self.doc.currency, self.doc.conversion_rate,
self.doc.meta.get_label("conversion_rate"), self.doc.company)
self.doc.conversion_rate = flt(self.doc.conversion_rate)
def calculate_item_values(self):
if not self.discount_amount_applied:
for item in self.doc.get("items"):
self.doc.round_floats_in(item)
if item.discount_percentage == 100:
item.rate = 0.0
elif item.price_list_rate:
if not item.rate or (item.pricing_rules and item.discount_percentage > 0):
item.rate = flt(item.price_list_rate *
(1.0 - (item.discount_percentage / 100.0)), item.precision("rate"))
item.discount_amount = item.price_list_rate * (item.discount_percentage / 100.0)
elif item.discount_amount and item.pricing_rules:
item.rate = item.price_list_rate - item.discount_amount
if item.doctype in ['Quotation Item', 'Sales Order Item', 'Delivery Note Item', 'Sales Invoice Item', 'POS Invoice Item', 'Purchase Invoice Item', 'Purchase Order Item', 'Purchase Receipt Item']:
item.rate_with_margin, item.base_rate_with_margin = self.calculate_margin(item)
if flt(item.rate_with_margin) > 0:
item.rate = flt(item.rate_with_margin * (1.0 - (item.discount_percentage / 100.0)), item.precision("rate"))
if item.discount_amount and not item.discount_percentage:
item.rate = item.rate_with_margin - item.discount_amount
else:
item.discount_amount = item.rate_with_margin - item.rate
elif flt(item.price_list_rate) > 0:
item.discount_amount = item.price_list_rate - item.rate
elif flt(item.price_list_rate) > 0 and not item.discount_amount:
item.discount_amount = item.price_list_rate - item.rate
item.net_rate = item.rate
if not item.qty and self.doc.get("is_return"):
item.amount = flt(-1 * item.rate, item.precision("amount"))
else:
item.amount = flt(item.rate * item.qty, item.precision("amount"))
item.net_amount = item.amount
self._set_in_company_currency(item, ["price_list_rate", "rate", "net_rate", "amount", "net_amount"])
item.item_tax_amount = 0.0
def _set_in_company_currency(self, doc, fields):
"""set values in base currency"""
for f in fields:
val = flt(flt(doc.get(f), doc.precision(f)) * self.doc.conversion_rate, doc.precision("base_" + f))
doc.set("base_" + f, val)
def initialize_taxes(self):
for tax in self.doc.get("taxes"):
if not self.discount_amount_applied:
validate_taxes_and_charges(tax)
validate_inclusive_tax(tax, self.doc)
if not self.doc.get('is_consolidated'):
tax.item_wise_tax_detail = {}
tax_fields = ["total", "tax_amount_after_discount_amount",
"tax_amount_for_current_item", "grand_total_for_current_item",
"tax_fraction_for_current_item", "grand_total_fraction_for_current_item"]
if tax.charge_type != "Actual" and \
not (self.discount_amount_applied and self.doc.apply_discount_on=="Grand Total"):
tax_fields.append("tax_amount")
for fieldname in tax_fields:
tax.set(fieldname, 0.0)
self.doc.round_floats_in(tax)
def determine_exclusive_rate(self):
if not any((cint(tax.included_in_print_rate) for tax in self.doc.get("taxes"))):
return
for item in self.doc.get("items"):
item_tax_map = self._load_item_tax_rate(item.item_tax_rate)
cumulated_tax_fraction = 0
total_inclusive_tax_amount_per_qty = 0
for i, tax in enumerate(self.doc.get("taxes")):
tax.tax_fraction_for_current_item, inclusive_tax_amount_per_qty = self.get_current_tax_fraction(tax, item_tax_map)
if i==0:
tax.grand_total_fraction_for_current_item = 1 + tax.tax_fraction_for_current_item
else:
tax.grand_total_fraction_for_current_item = \
self.doc.get("taxes")[i-1].grand_total_fraction_for_current_item \
+ tax.tax_fraction_for_current_item
cumulated_tax_fraction += tax.tax_fraction_for_current_item
total_inclusive_tax_amount_per_qty += inclusive_tax_amount_per_qty * flt(item.qty)
if not self.discount_amount_applied and item.qty and (cumulated_tax_fraction or total_inclusive_tax_amount_per_qty):
amount = flt(item.amount) - total_inclusive_tax_amount_per_qty
item.net_amount = flt(amount / (1 + cumulated_tax_fraction))
item.net_rate = flt(item.net_amount / item.qty, item.precision("net_rate"))
item.discount_percentage = flt(item.discount_percentage,
item.precision("discount_percentage"))
self._set_in_company_currency(item, ["net_rate", "net_amount"])
def _load_item_tax_rate(self, item_tax_rate):
return json.loads(item_tax_rate) if item_tax_rate else {}
def get_current_tax_fraction(self, tax, item_tax_map):
"""
Get tax fraction for calculating tax exclusive amount
from tax inclusive amount
"""
current_tax_fraction = 0
inclusive_tax_amount_per_qty = 0
if cint(tax.included_in_print_rate):
tax_rate = self._get_tax_rate(tax, item_tax_map)
if tax.charge_type == "On Net Total":
current_tax_fraction = tax_rate / 100.0
elif tax.charge_type == "On Previous Row Amount":
current_tax_fraction = (tax_rate / 100.0) * \
self.doc.get("taxes")[cint(tax.row_id) - 1].tax_fraction_for_current_item
elif tax.charge_type == "On Previous Row Total":
current_tax_fraction = (tax_rate / 100.0) * \
self.doc.get("taxes")[cint(tax.row_id) - 1].grand_total_fraction_for_current_item
elif tax.charge_type == "On Item Quantity":
inclusive_tax_amount_per_qty = flt(tax_rate)
if getattr(tax, "add_deduct_tax", None) and tax.add_deduct_tax == "Deduct":
current_tax_fraction *= -1.0
inclusive_tax_amount_per_qty *= -1.0
return current_tax_fraction, inclusive_tax_amount_per_qty
def _get_tax_rate(self, tax, item_tax_map):
if tax.account_head in item_tax_map:
return flt(item_tax_map.get(tax.account_head), self.doc.precision("rate", tax))
else:
return tax.rate
def calculate_net_total(self):
self.doc.total_qty = self.doc.total = self.doc.base_total = self.doc.net_total = self.doc.base_net_total = 0.0
for item in self.doc.get("items"):
self.doc.total += item.amount
self.doc.total_qty += item.qty
self.doc.base_total += item.base_amount
self.doc.net_total += item.net_amount
self.doc.base_net_total += item.base_net_amount
self.doc.round_floats_in(self.doc, ["total", "base_total", "net_total", "base_net_total"])
def calculate_taxes(self):
self.doc.rounding_adjustment = 0
# maintain actual tax rate based on idx
actual_tax_dict = dict([[tax.idx, flt(tax.tax_amount, tax.precision("tax_amount"))]
for tax in self.doc.get("taxes") if tax.charge_type == "Actual"])
for n, item in enumerate(self.doc.get("items")):
item_tax_map = self._load_item_tax_rate(item.item_tax_rate)
for i, tax in enumerate(self.doc.get("taxes")):
# tax_amount represents the amount of tax for the current step
current_tax_amount = self.get_current_tax_amount(item, tax, item_tax_map)
# Adjust divisional loss to the last item
if tax.charge_type == "Actual":
actual_tax_dict[tax.idx] -= current_tax_amount
if n == len(self.doc.get("items")) - 1:
current_tax_amount += actual_tax_dict[tax.idx]
# accumulate tax amount into tax.tax_amount
if tax.charge_type != "Actual" and \
not (self.discount_amount_applied and self.doc.apply_discount_on=="Grand Total"):
tax.tax_amount += current_tax_amount
# store tax_amount for current item as it will be used for
# charge type = 'On Previous Row Amount'
tax.tax_amount_for_current_item = current_tax_amount
# set tax after discount
tax.tax_amount_after_discount_amount += current_tax_amount
current_tax_amount = self.get_tax_amount_if_for_valuation_or_deduction(current_tax_amount, tax)
# note: grand_total_for_current_item contains the contribution of
# item's amount, previously applied tax and the current tax on that item
if i==0:
tax.grand_total_for_current_item = flt(item.net_amount + current_tax_amount)
else:
tax.grand_total_for_current_item = \
flt(self.doc.get("taxes")[i-1].grand_total_for_current_item + current_tax_amount)
# set precision in the last item iteration
if n == len(self.doc.get("items")) - 1:
self.round_off_totals(tax)
self._set_in_company_currency(tax,
["tax_amount", "tax_amount_after_discount_amount"])
self.round_off_base_values(tax)
self.set_cumulative_total(i, tax)
self._set_in_company_currency(tax, ["total"])
# adjust Discount Amount loss in last tax iteration
if i == (len(self.doc.get("taxes")) - 1) and self.discount_amount_applied \
and self.doc.discount_amount and self.doc.apply_discount_on == "Grand Total":
self.doc.rounding_adjustment = flt(self.doc.grand_total
- flt(self.doc.discount_amount) - tax.total,
self.doc.precision("rounding_adjustment"))
def get_tax_amount_if_for_valuation_or_deduction(self, tax_amount, tax):
# if just for valuation, do not add the tax amount in total
# if tax/charges is for deduction, multiply by -1
if getattr(tax, "category", None):
tax_amount = 0.0 if (tax.category == "Valuation") else tax_amount
if self.doc.doctype in ["Purchase Order", "Purchase Invoice", "Purchase Receipt", "Supplier Quotation"]:
tax_amount *= -1.0 if (tax.add_deduct_tax == "Deduct") else 1.0
return tax_amount
def set_cumulative_total(self, row_idx, tax):
tax_amount = tax.tax_amount_after_discount_amount
tax_amount = self.get_tax_amount_if_for_valuation_or_deduction(tax_amount, tax)
if row_idx == 0:
tax.total = flt(self.doc.net_total + tax_amount, tax.precision("total"))
else:
tax.total = flt(self.doc.get("taxes")[row_idx-1].total + tax_amount, tax.precision("total"))
def get_current_tax_amount(self, item, tax, item_tax_map):
tax_rate = self._get_tax_rate(tax, item_tax_map)
current_tax_amount = 0.0
if tax.charge_type == "Actual":
# distribute the tax amount proportionally to each item row
actual = flt(tax.tax_amount, tax.precision("tax_amount"))
current_tax_amount = item.net_amount*actual / self.doc.net_total if self.doc.net_total else 0.0
elif tax.charge_type == "On Net Total":
current_tax_amount = (tax_rate / 100.0) * item.net_amount
elif tax.charge_type == "On Previous Row Amount":
current_tax_amount = (tax_rate / 100.0) * \
self.doc.get("taxes")[cint(tax.row_id) - 1].tax_amount_for_current_item
elif tax.charge_type == "On Previous Row Total":
current_tax_amount = (tax_rate / 100.0) * \
self.doc.get("taxes")[cint(tax.row_id) - 1].grand_total_for_current_item
elif tax.charge_type == "On Item Quantity":
current_tax_amount = tax_rate * item.qty
if not self.doc.get("is_consolidated"):
self.set_item_wise_tax(item, tax, tax_rate, current_tax_amount)
return current_tax_amount
def set_item_wise_tax(self, item, tax, tax_rate, current_tax_amount):
# store tax breakup for each item
key = item.item_code or item.item_name
item_wise_tax_amount = current_tax_amount*self.doc.conversion_rate
if tax.item_wise_tax_detail.get(key):
item_wise_tax_amount += tax.item_wise_tax_detail[key][1]
tax.item_wise_tax_detail[key] = [tax_rate,flt(item_wise_tax_amount)]
def round_off_totals(self, tax):
if tax.account_head in frappe.flags.round_off_applicable_accounts:
tax.tax_amount = round(tax.tax_amount, 0)
tax.tax_amount_after_discount_amount = round(tax.tax_amount_after_discount_amount, 0)
tax.tax_amount = flt(tax.tax_amount, tax.precision("tax_amount"))
tax.tax_amount_after_discount_amount = flt(tax.tax_amount_after_discount_amount,
tax.precision("tax_amount"))
def round_off_base_values(self, tax):
# Round off to nearest integer based on regional settings
if tax.account_head in frappe.flags.round_off_applicable_accounts:
tax.base_tax_amount = round(tax.base_tax_amount, 0)
tax.base_tax_amount_after_discount_amount = round(tax.base_tax_amount_after_discount_amount, 0)
def manipulate_grand_total_for_inclusive_tax(self):
# if fully inclusive taxes and diff
if self.doc.get("taxes") and any(cint(t.included_in_print_rate) for t in self.doc.get("taxes")):
last_tax = self.doc.get("taxes")[-1]
non_inclusive_tax_amount = sum(flt(d.tax_amount_after_discount_amount)
for d in self.doc.get("taxes") if not d.included_in_print_rate)
diff = self.doc.total + non_inclusive_tax_amount \
- flt(last_tax.total, last_tax.precision("total"))
# If discount amount applied, deduct the discount amount
# because self.doc.total is always without discount, but last_tax.total is after discount
if self.discount_amount_applied and self.doc.discount_amount:
diff -= flt(self.doc.discount_amount)
diff = flt(diff, self.doc.precision("rounding_adjustment"))
if diff and abs(diff) <= (5.0 / 10**last_tax.precision("tax_amount")):
self.doc.rounding_adjustment = diff
def calculate_totals(self):
self.doc.grand_total = flt(self.doc.get("taxes")[-1].total) + flt(self.doc.rounding_adjustment) \
if self.doc.get("taxes") else flt(self.doc.net_total)
self.doc.total_taxes_and_charges = flt(self.doc.grand_total - self.doc.net_total
- flt(self.doc.rounding_adjustment), self.doc.precision("total_taxes_and_charges"))
self._set_in_company_currency(self.doc, ["total_taxes_and_charges", "rounding_adjustment"])
if self.doc.doctype in ["Quotation", "Sales Order", "Delivery Note", "Sales Invoice", "POS Invoice"]:
self.doc.base_grand_total = flt(self.doc.grand_total * self.doc.conversion_rate, self.doc.precision("base_grand_total")) \
if self.doc.total_taxes_and_charges else self.doc.base_net_total
else:
self.doc.taxes_and_charges_added = self.doc.taxes_and_charges_deducted = 0.0
for tax in self.doc.get("taxes"):
if tax.category in ["Valuation and Total", "Total"]:
if tax.add_deduct_tax == "Add":
self.doc.taxes_and_charges_added += flt(tax.tax_amount_after_discount_amount)
else:
self.doc.taxes_and_charges_deducted += flt(tax.tax_amount_after_discount_amount)
self.doc.round_floats_in(self.doc, ["taxes_and_charges_added", "taxes_and_charges_deducted"])
self.doc.base_grand_total = flt(self.doc.grand_total * self.doc.conversion_rate) \
if (self.doc.taxes_and_charges_added or self.doc.taxes_and_charges_deducted) \
else self.doc.base_net_total
self._set_in_company_currency(self.doc,
["taxes_and_charges_added", "taxes_and_charges_deducted"])
self.doc.round_floats_in(self.doc, ["grand_total", "base_grand_total"])
self.set_rounded_total()
def calculate_total_net_weight(self):
if self.doc.meta.get_field('total_net_weight'):
self.doc.total_net_weight = 0.0
for d in self.doc.items:
if d.total_weight:
self.doc.total_net_weight += d.total_weight
def set_rounded_total(self):
if self.doc.meta.get_field("rounded_total"):
if self.doc.is_rounded_total_disabled():
self.doc.rounded_total = self.doc.base_rounded_total = 0
return
self.doc.rounded_total = round_based_on_smallest_currency_fraction(self.doc.grand_total,
self.doc.currency, self.doc.precision("rounded_total"))
#if print_in_rate is set, we would have already calculated rounding adjustment
self.doc.rounding_adjustment += flt(self.doc.rounded_total - self.doc.grand_total,
self.doc.precision("rounding_adjustment"))
self._set_in_company_currency(self.doc, ["rounding_adjustment", "rounded_total"])
def _cleanup(self):
if not self.doc.get('is_consolidated'):
for tax in self.doc.get("taxes"):
tax.item_wise_tax_detail = json.dumps(tax.item_wise_tax_detail, separators=(',', ':'))
def set_discount_amount(self):
if self.doc.additional_discount_percentage:
self.doc.discount_amount = flt(flt(self.doc.get(scrub(self.doc.apply_discount_on)))
* self.doc.additional_discount_percentage / 100, self.doc.precision("discount_amount"))
def apply_discount_amount(self):
if self.doc.discount_amount:
if not self.doc.apply_discount_on:
frappe.throw(_("Please select Apply Discount On"))
self.doc.base_discount_amount = flt(self.doc.discount_amount * self.doc.conversion_rate,
self.doc.precision("base_discount_amount"))
total_for_discount_amount = self.get_total_for_discount_amount()
taxes = self.doc.get("taxes")
net_total = 0
if total_for_discount_amount:
# calculate item amount after Discount Amount
for i, item in enumerate(self.doc.get("items")):
distributed_amount = flt(self.doc.discount_amount) * \
item.net_amount / total_for_discount_amount
item.net_amount = flt(item.net_amount - distributed_amount, item.precision("net_amount"))
net_total += item.net_amount
# discount amount rounding loss adjustment if no taxes
if (self.doc.apply_discount_on == "Net Total" or not taxes or total_for_discount_amount==self.doc.net_total) \
and i == len(self.doc.get("items")) - 1:
discount_amount_loss = flt(self.doc.net_total - net_total - self.doc.discount_amount,
self.doc.precision("net_total"))
item.net_amount = flt(item.net_amount + discount_amount_loss,
item.precision("net_amount"))
item.net_rate = flt(item.net_amount / item.qty, item.precision("net_rate")) if item.qty else 0
self._set_in_company_currency(item, ["net_rate", "net_amount"])
self.discount_amount_applied = True
self._calculate()
else:
self.doc.base_discount_amount = 0
def get_total_for_discount_amount(self):
if self.doc.apply_discount_on == "Net Total":
return self.doc.net_total
else:
actual_taxes_dict = {}
for tax in self.doc.get("taxes"):
if tax.charge_type in ["Actual", "On Item Quantity"]:
tax_amount = self.get_tax_amount_if_for_valuation_or_deduction(tax.tax_amount, tax)
actual_taxes_dict.setdefault(tax.idx, tax_amount)
elif tax.row_id in actual_taxes_dict:
actual_tax_amount = flt(actual_taxes_dict.get(tax.row_id, 0)) * flt(tax.rate) / 100
actual_taxes_dict.setdefault(tax.idx, actual_tax_amount)
return flt(self.doc.grand_total - sum(actual_taxes_dict.values()),
self.doc.precision("grand_total"))
def calculate_total_advance(self):
if self.doc.docstatus < 2:
total_allocated_amount = sum(flt(adv.allocated_amount, adv.precision("allocated_amount"))
for adv in self.doc.get("advances"))
self.doc.total_advance = flt(total_allocated_amount, self.doc.precision("total_advance"))
grand_total = self.doc.rounded_total or self.doc.grand_total
if self.doc.party_account_currency == self.doc.currency:
invoice_total = flt(grand_total - flt(self.doc.write_off_amount),
self.doc.precision("grand_total"))
else:
base_write_off_amount = flt(flt(self.doc.write_off_amount) * self.doc.conversion_rate,
self.doc.precision("base_write_off_amount"))
invoice_total = flt(grand_total * self.doc.conversion_rate,
self.doc.precision("grand_total")) - base_write_off_amount
if invoice_total > 0 and self.doc.total_advance > invoice_total:
frappe.throw(_("Advance amount cannot be greater than {0} {1}")
.format(self.doc.party_account_currency, invoice_total))
if self.doc.docstatus == 0:
self.calculate_outstanding_amount()
def is_internal_invoice(self):
"""
Checks if its an internal transfer invoice
and decides if to calculate any out standing amount or not
"""
if self.doc.doctype in ('Sales Invoice', 'Purchase Invoice') and self.doc.is_internal_transfer():
return True
return False
def calculate_outstanding_amount(self):
# NOTE:
# write_off_amount is only for POS Invoice
# total_advance is only for non POS Invoice
if self.doc.doctype == "Sales Invoice":
self.calculate_paid_amount()
if self.doc.is_return and self.doc.return_against and not self.doc.get('is_pos') or \
self.is_internal_invoice(): return
self.doc.round_floats_in(self.doc, ["grand_total", "total_advance", "write_off_amount"])
self._set_in_company_currency(self.doc, ['write_off_amount'])
if self.doc.doctype in ["Sales Invoice", "Purchase Invoice"]:
grand_total = self.doc.rounded_total or self.doc.grand_total
if self.doc.party_account_currency == self.doc.currency:
total_amount_to_pay = flt(grand_total - self.doc.total_advance
- flt(self.doc.write_off_amount), self.doc.precision("grand_total"))
else:
total_amount_to_pay = flt(flt(grand_total *
self.doc.conversion_rate, self.doc.precision("grand_total")) - self.doc.total_advance
- flt(self.doc.base_write_off_amount), self.doc.precision("grand_total"))
self.doc.round_floats_in(self.doc, ["paid_amount"])
change_amount = 0
if self.doc.doctype == "Sales Invoice" and not self.doc.get('is_return'):
self.calculate_write_off_amount()
self.calculate_change_amount()
change_amount = self.doc.change_amount \
if self.doc.party_account_currency == self.doc.currency else self.doc.base_change_amount
paid_amount = self.doc.paid_amount \
if self.doc.party_account_currency == self.doc.currency else self.doc.base_paid_amount
self.doc.outstanding_amount = flt(total_amount_to_pay - flt(paid_amount) + flt(change_amount),
self.doc.precision("outstanding_amount"))
if self.doc.doctype == 'Sales Invoice' and self.doc.get('is_pos') and self.doc.get('is_return'):
self.update_paid_amount_for_return(total_amount_to_pay)
def calculate_paid_amount(self):
paid_amount = base_paid_amount = 0.0
if self.doc.is_pos:
for payment in self.doc.get('payments'):
payment.amount = flt(payment.amount)
payment.base_amount = payment.amount * flt(self.doc.conversion_rate)
paid_amount += payment.amount
base_paid_amount += payment.base_amount
elif not self.doc.is_return:
self.doc.set('payments', [])
if self.doc.redeem_loyalty_points and self.doc.loyalty_amount:
base_paid_amount += self.doc.loyalty_amount
paid_amount += (self.doc.loyalty_amount / flt(self.doc.conversion_rate))
self.doc.paid_amount = flt(paid_amount, self.doc.precision("paid_amount"))
self.doc.base_paid_amount = flt(base_paid_amount, self.doc.precision("base_paid_amount"))
def calculate_change_amount(self):
self.doc.change_amount = 0.0
self.doc.base_change_amount = 0.0
if self.doc.doctype == "Sales Invoice" \
and self.doc.paid_amount > self.doc.grand_total and not self.doc.is_return \
and any(d.type == "Cash" for d in self.doc.payments):
grand_total = self.doc.rounded_total or self.doc.grand_total
base_grand_total = self.doc.base_rounded_total or self.doc.base_grand_total
self.doc.change_amount = flt(self.doc.paid_amount - grand_total +
self.doc.write_off_amount, self.doc.precision("change_amount"))
self.doc.base_change_amount = flt(self.doc.base_paid_amount - base_grand_total +
self.doc.base_write_off_amount, self.doc.precision("base_change_amount"))
def calculate_write_off_amount(self):
if flt(self.doc.change_amount) > 0:
self.doc.write_off_amount = flt(self.doc.grand_total - self.doc.paid_amount
+ self.doc.change_amount, self.doc.precision("write_off_amount"))
self.doc.base_write_off_amount = flt(self.doc.write_off_amount * self.doc.conversion_rate,
self.doc.precision("base_write_off_amount"))
def calculate_margin(self, item):
rate_with_margin = 0.0
base_rate_with_margin = 0.0
if item.price_list_rate:
if item.pricing_rules and not self.doc.ignore_pricing_rule:
has_margin = False
for d in get_applied_pricing_rules(item.pricing_rules):
pricing_rule = frappe.get_cached_doc('Pricing Rule', d)
if pricing_rule.margin_rate_or_amount and ((pricing_rule.currency == self.doc.currency and
pricing_rule.margin_type in ['Amount', 'Percentage']) or pricing_rule.margin_type == 'Percentage'):
item.margin_type = pricing_rule.margin_type
item.margin_rate_or_amount = pricing_rule.margin_rate_or_amount
has_margin = True
if not has_margin:
item.margin_type = None
item.margin_rate_or_amount = 0.0
if not item.pricing_rules and flt(item.rate) > flt(item.price_list_rate):
item.margin_type = "Amount"
item.margin_rate_or_amount = flt(item.rate - item.price_list_rate,
item.precision("margin_rate_or_amount"))
item.rate_with_margin = item.rate
elif item.margin_type and item.margin_rate_or_amount:
margin_value = item.margin_rate_or_amount if item.margin_type == 'Amount' else flt(item.price_list_rate) * flt(item.margin_rate_or_amount) / 100
rate_with_margin = flt(item.price_list_rate) + flt(margin_value)
base_rate_with_margin = flt(rate_with_margin) * flt(self.doc.conversion_rate)
return rate_with_margin, base_rate_with_margin
def set_item_wise_tax_breakup(self):
self.doc.other_charges_calculation = get_itemised_tax_breakup_html(self.doc)
def update_paid_amount_for_return(self, total_amount_to_pay):
default_mode_of_payment = frappe.db.get_value('POS Payment Method',
{'parent': self.doc.pos_profile, 'default': 1}, ['mode_of_payment'], as_dict=1)
self.doc.payments = []
if default_mode_of_payment:
self.doc.append('payments', {
'mode_of_payment': default_mode_of_payment.mode_of_payment,
'amount': total_amount_to_pay,
'default': 1
})
else:
self.doc.is_pos = 0
self.doc.pos_profile = ''
self.calculate_paid_amount()
def get_itemised_tax_breakup_html(doc):
if not doc.taxes:
return
frappe.flags.company = doc.company
# get headers
tax_accounts = []
for tax in doc.taxes:
if getattr(tax, "category", None) and tax.category=="Valuation":
continue
if tax.description not in tax_accounts:
tax_accounts.append(tax.description)
headers = get_itemised_tax_breakup_header(doc.doctype + " Item", tax_accounts)
# get tax breakup data
itemised_tax, itemised_taxable_amount = get_itemised_tax_breakup_data(doc)
get_rounded_tax_amount(itemised_tax, doc.precision("tax_amount", "taxes"))
update_itemised_tax_data(doc)
frappe.flags.company = None
return frappe.render_template(
"templates/includes/itemised_tax_breakup.html", dict(
headers=headers,
itemised_tax=itemised_tax,
itemised_taxable_amount=itemised_taxable_amount,
tax_accounts=tax_accounts,
doc=doc
)
)
@frappe.whitelist()
def get_round_off_applicable_accounts(company, account_list):
account_list = get_regional_round_off_accounts(company, account_list)
return account_list
@erpnext.allow_regional
def get_regional_round_off_accounts(company, account_list):
pass
@erpnext.allow_regional
def update_itemised_tax_data(doc):
#Don't delete this method, used for localization
pass
@erpnext.allow_regional
def get_itemised_tax_breakup_header(item_doctype, tax_accounts):
return [_("Item"), _("Taxable Amount")] + tax_accounts
@erpnext.allow_regional
def get_itemised_tax_breakup_data(doc):
itemised_tax = get_itemised_tax(doc.taxes)
itemised_taxable_amount = get_itemised_taxable_amount(doc.items)
return itemised_tax, itemised_taxable_amount
def get_itemised_tax(taxes, with_tax_account=False):
itemised_tax = {}
for tax in taxes:
if getattr(tax, "category", None) and tax.category=="Valuation":
continue
item_tax_map = json.loads(tax.item_wise_tax_detail) if tax.item_wise_tax_detail else {}
if item_tax_map:
for item_code, tax_data in item_tax_map.items():
itemised_tax.setdefault(item_code, frappe._dict())
tax_rate = 0.0
tax_amount = 0.0
if isinstance(tax_data, list):
tax_rate = flt(tax_data[0])
tax_amount = flt(tax_data[1])
else:
tax_rate = flt(tax_data)
itemised_tax[item_code][tax.description] = frappe._dict(dict(
tax_rate = tax_rate,
tax_amount = tax_amount
))
if with_tax_account:
itemised_tax[item_code][tax.description].tax_account = tax.account_head
return itemised_tax
def get_itemised_taxable_amount(items):
itemised_taxable_amount = frappe._dict()
for item in items:
item_code = item.item_code or item.item_name
itemised_taxable_amount.setdefault(item_code, 0)
itemised_taxable_amount[item_code] += item.net_amount
return itemised_taxable_amount
def get_rounded_tax_amount(itemised_tax, precision):
# Rounding based on tax_amount precision
for taxes in itemised_tax.values():
for tax_account in taxes:
taxes[tax_account]["tax_amount"] = flt(taxes[tax_account]["tax_amount"], precision)
class init_landed_taxes_and_totals(object):
def __init__(self, doc):
self.doc = doc
self.tax_field = 'taxes' if self.doc.doctype == 'Landed Cost Voucher' else 'additional_costs'
self.set_account_currency()
self.set_exchange_rate()
self.set_amounts_in_company_currency()
def set_account_currency(self):
company_currency = erpnext.get_company_currency(self.doc.company)
for d in self.doc.get(self.tax_field):
if not d.account_currency:
account_currency = frappe.db.get_value('Account', d.expense_account, 'account_currency')
d.account_currency = account_currency or company_currency
def set_exchange_rate(self):
company_currency = erpnext.get_company_currency(self.doc.company)
for d in self.doc.get(self.tax_field):
if d.account_currency == company_currency:
d.exchange_rate = 1
elif not d.exchange_rate:
d.exchange_rate = get_exchange_rate(self.doc.posting_date, account=d.expense_account,
account_currency=d.account_currency, company=self.doc.company)
if not d.exchange_rate:
frappe.throw(_("Row {0}: Exchange Rate is mandatory").format(d.idx))
def set_amounts_in_company_currency(self):
for d in self.doc.get(self.tax_field):
d.amount = flt(d.amount, d.precision("amount"))
d.base_amount = flt(d.amount * flt(d.exchange_rate), d.precision("base_amount"))
|
SripriyaSeetharam/tacker
|
refs/heads/master
|
tacker/api/views/versions.py
|
29
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
def get_view_builder(req):
base_url = req.application_url
return ViewBuilder(base_url)
class ViewBuilder(object):
def __init__(self, base_url):
"""Object initialization.
:param base_url: url of the root wsgi application
"""
self.base_url = base_url
def build(self, version_data):
"""Generic method used to generate a version entity."""
version = {
"id": version_data["id"],
"status": version_data["status"],
"links": self._build_links(version_data),
}
return version
def _build_links(self, version_data):
"""Generate a container of links that refer to the provided version."""
href = self.generate_href(version_data["id"])
links = [
{
"rel": "self",
"href": href,
},
]
return links
def generate_href(self, version_number):
"""Create an url that refers to a specific version_number."""
return os.path.join(self.base_url, version_number)
|
styskin/pybrain
|
refs/heads/master
|
examples/unsupervised/lsh.py
|
26
|
#!/usr/bin/env python
__author__ = 'Justin Bayer, bayer.justin@googlemail.com'
import logging
from random import shuffle
from pylab import show, plot, clf
from pybrain.supervised.knn.lsh.nearoptimal import MultiDimHash
from scipy import random, array, dot, zeros
from scipy.linalg import orth
def randomRotation(dim):
"""Return a random rotation matrix of rank dim."""
return orth(random.random((dim, dim)))
def makeData(amount = 10000):
"""Return 2D dataset of points in (0, 1) where points in a circle of
radius .4 around the center are blue and all the others are red."""
center = array([0.5, 0.5])
def makePoint():
"""Return a random point and its satellite information.
Satellite is 'blue' if point is in the circle, else 'red'."""
point = random.random((2,)) * 10
vectorLength = lambda x: dot(x.T, x)
return point, 'blue' if vectorLength(point - center) < 25 else 'red'
return [makePoint() for _ in range(amount)]
if __name__ == '__main__':
# Amount of dimensions to test with
dimensions = 3
loglevel = logging.DEBUG
logging.basicConfig(level=loglevel,
format='%(asctime)s %(levelname)s %(message)s')
logging.info("Making dataset...")
data = makeData(1000)
logging.info("Making random projection...")
proj = zeros((2, dimensions))
proj[0, 0] = 1
proj[1, 1] = 1
randRot = randomRotation(dimensions)
proj = dot(proj, randRot)
logging.info("Initializing data structure...")
m = MultiDimHash(dimensions, 2, 0.80)
logging.info("Putting data into hash...")
for point, satellite in data:
point = dot(point, proj)
m.insert(point, satellite)
logging.info("Retrieve nearest neighbours...")
result = []
width, height = 2**5, 2**5
grid = (array([i / width * 10, j / height * 10])
for i in range(width)
for j in range(height))
projected_grid = [(p, dot(p, proj)) for p in grid]
# Just to fake random access
shuffle(projected_grid)
for p, pp in projected_grid:
nns = m.knn(pp, 1)
if nns == []:
continue
_, color = nns[0]
result.append((p, color))
# Visualize it
visualize = True
if visualize:
clf()
result = [((x, y), color)
for (x, y), color in result
if color is not None]
xs_red = [x for ((x, y), color) in result if color == 'red']
ys_red = [y for ((x, y), color) in result if color == 'red']
xs_blue = [x for ((x, y), color) in result if color == 'blue']
ys_blue = [y for ((x, y), color) in result if color == 'blue']
plot(xs_red, ys_red, 'ro')
plot(xs_blue, ys_blue, 'bo')
show()
ballsizes = (len(ball) for ball in m.balls.values())
logging.info("Sizes of the balls: " + " ".join(str(i) for i in ballsizes))
logging.info("Finished")
|
supercheetah/diceroller
|
refs/heads/master
|
rollenum.py
|
1
|
from enum import Enum
Ops = Enum('add','sub','mul','div')
Fn = Enum('constant','dice','xdice','op','var_grouping','const_grouping')
StrFn = lambda x: eval('Fn.{0}'.format(x))
StrOps = lambda x: eval('Ops.{0}'.format(x))
OpsRepr = { Ops.add:'+', Ops.sub:'-', Ops.mul:'*', Ops.div:'/' }
|
WatanabeYasumasa/edx-platform
|
refs/heads/gacco2/master
|
i18n/tests/test_segment.py
|
24
|
"""Test i18n/segment.py"""
import os.path
import shutil
import unittest
from path import path
import polib
from i18n.segment import segment_pofile
HERE = path(__file__).dirname()
TEST_DATA = HERE / "data"
WORK = HERE / "work"
class SegmentTest(unittest.TestCase):
"""Test segment_pofile."""
def setUp(self):
if not os.path.exists(WORK):
os.mkdir(WORK)
self.addCleanup(shutil.rmtree, WORK)
def assert_pofile_same(self, pofile1, pofile2):
"""The paths `p1` and `p2` should be identical pofiles."""
po1 = polib.pofile(pofile1)
po2 = polib.pofile(pofile2)
self.assertEqual(po1, po2)
def test_sample_data(self):
work_file = WORK / "django.po"
shutil.copyfile(TEST_DATA / "django_before.po", work_file)
original_pofile = polib.pofile(work_file)
written = segment_pofile(
work_file,
{
'studio.po': [
'cms/*',
'other_cms/*',
],
}
)
self.assertEqual(written, set([WORK / "django.po", WORK / "studio.po"]))
pofiles = [polib.pofile(f) for f in written]
after_entries = sum(len(pofile) for pofile in pofiles)
self.assertEqual(len(original_pofile), after_entries)
original_ids = set(m.msgid for m in original_pofile)
after_ids = set(m.msgid for pofile in pofiles for m in pofile)
self.assertEqual(original_ids, after_ids)
self.assert_pofile_same(WORK / "django.po", TEST_DATA / "django_after.po")
self.assert_pofile_same(WORK / "studio.po", TEST_DATA / "studio.po")
|
wangjun/pyload
|
refs/heads/stable
|
module/plugins/crypter/LofCc.py
|
2
|
# -*- coding: utf-8 -*-
from module.plugins.internal.DeadCrypter import DeadCrypter
class LofCc(DeadCrypter):
__name__ = "LofCc"
__type__ = "container"
__pattern__ = r"http://lof.cc/(.*)"
__version__ = "0.21"
__description__ = """lof.cc Plugin"""
__author_name__ = ("mkaay")
__author_mail__ = ("mkaay@mkaay.de")
|
jusdng/odoo
|
refs/heads/8.0
|
addons/mrp_byproduct/mrp_byproduct.py
|
150
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields
from openerp.osv import osv
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
class mrp_subproduct(osv.osv):
_name = 'mrp.subproduct'
_description = 'Byproduct'
_columns={
'product_id': fields.many2one('product.product', 'Product', required=True),
'product_qty': fields.float('Product Qty', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'subproduct_type': fields.selection([('fixed','Fixed'),('variable','Variable')], 'Quantity Type', required=True, help="Define how the quantity of byproducts will be set on the production orders using this BoM.\
'Fixed' depicts a situation where the quantity of created byproduct is always equal to the quantity set on the BoM, regardless of how many are created in the production order.\
By opposition, 'Variable' means that the quantity will be computed as\
'(quantity of byproduct set on the BoM / quantity of manufactured product set on the BoM * quantity of manufactured product in the production order.)'"),
'bom_id': fields.many2one('mrp.bom', 'BoM', ondelete='cascade'),
}
_defaults={
'subproduct_type': 'variable',
'product_qty': lambda *a: 1.0,
}
def onchange_product_id(self, cr, uid, ids, product_id, context=None):
""" Changes UoM if product_id changes.
@param product_id: Changed product_id
@return: Dictionary of changed values
"""
if product_id:
prod = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
v = {'product_uom': prod.uom_id.id}
return {'value': v}
return {}
def onchange_uom(self, cr, uid, ids, product_id, product_uom, context=None):
res = {'value':{}}
if not product_uom or not product_id:
return res
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
uom = self.pool.get('product.uom').browse(cr, uid, product_uom, context=context)
if uom.category_id.id != product.uom_id.category_id.id:
res['warning'] = {'title': _('Warning'), 'message': _('The Product Unit of Measure you chose has a different category than in the product form.')}
res['value'].update({'product_uom': product.uom_id.id})
return res
class mrp_bom(osv.osv):
_name = 'mrp.bom'
_description = 'Bill of Material'
_inherit='mrp.bom'
_columns={
'sub_products':fields.one2many('mrp.subproduct', 'bom_id', 'Byproducts', copy=True),
}
class mrp_production(osv.osv):
_description = 'Production'
_inherit= 'mrp.production'
def action_confirm(self, cr, uid, ids, context=None):
""" Confirms production order and calculates quantity based on subproduct_type.
@return: Newly generated picking Id.
"""
move_obj = self.pool.get('stock.move')
picking_id = super(mrp_production,self).action_confirm(cr, uid, ids, context=context)
product_uom_obj = self.pool.get('product.uom')
for production in self.browse(cr, uid, ids):
source = production.product_id.property_stock_production.id
if not production.bom_id:
continue
for sub_product in production.bom_id.sub_products:
product_uom_factor = product_uom_obj._compute_qty(cr, uid, production.product_uom.id, production.product_qty, production.bom_id.product_uom.id)
qty1 = sub_product.product_qty
if sub_product.subproduct_type == 'variable':
if production.product_qty:
qty1 *= product_uom_factor / (production.bom_id.product_qty or 1.0)
data = {
'name': 'PROD:'+production.name,
'date': production.date_planned,
'product_id': sub_product.product_id.id,
'product_uom_qty': qty1,
'product_uom': sub_product.product_uom.id,
'location_id': source,
'location_dest_id': production.location_dest_id.id,
'move_dest_id': production.move_prod_id.id,
'production_id': production.id
}
move_id = move_obj.create(cr, uid, data, context=context)
move_obj.action_confirm(cr, uid, [move_id], context=context)
return picking_id
def _get_subproduct_factor(self, cr, uid, production_id, move_id=None, context=None):
"""Compute the factor to compute the qty of procucts to produce for the given production_id. By default,
it's always equal to the quantity encoded in the production order or the production wizard, but with
the module mrp_byproduct installed it can differ for byproducts having type 'variable'.
:param production_id: ID of the mrp.order
:param move_id: ID of the stock move that needs to be produced. Identify the product to produce.
:return: The factor to apply to the quantity that we should produce for the given production order and stock move.
"""
sub_obj = self.pool.get('mrp.subproduct')
move_obj = self.pool.get('stock.move')
production_obj = self.pool.get('mrp.production')
production_browse = production_obj.browse(cr, uid, production_id, context=context)
move_browse = move_obj.browse(cr, uid, move_id, context=context)
subproduct_factor = 1
sub_id = sub_obj.search(cr, uid,[('product_id', '=', move_browse.product_id.id),('bom_id', '=', production_browse.bom_id.id), ('subproduct_type', '=', 'variable')], context=context)
if sub_id:
subproduct_record = sub_obj.browse(cr ,uid, sub_id[0], context=context)
if subproduct_record.bom_id.product_qty:
subproduct_factor = subproduct_record.product_qty / subproduct_record.bom_id.product_qty
return subproduct_factor
return super(mrp_production, self)._get_subproduct_factor(cr, uid, production_id, move_id, context=context)
class change_production_qty(osv.osv_memory):
_inherit = 'change.production.qty'
def _update_product_to_produce(self, cr, uid, prod, qty, context=None):
bom_obj = self.pool.get('mrp.bom')
move_lines_obj = self.pool.get('stock.move')
prod_obj = self.pool.get('mrp.production')
for m in prod.move_created_ids:
if m.product_id.id == prod.product_id.id:
move_lines_obj.write(cr, uid, [m.id], {'product_uom_qty': qty})
else:
for sub_product_line in prod.bom_id.sub_products:
if sub_product_line.product_id.id == m.product_id.id:
factor = prod_obj._get_subproduct_factor(cr, uid, prod.id, m.id, context=context)
subproduct_qty = sub_product_line.subproduct_type == 'variable' and qty * factor or sub_product_line.product_qty
move_lines_obj.write(cr, uid, [m.id], {'product_uom_qty': subproduct_qty})
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
alexryndin/ambari
|
refs/heads/branch-adh-1.5
|
ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/after-INSTALL/scripts/hook.py
|
4
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management.libraries.script.hook import Hook
from shared_initialization import link_configs
from shared_initialization import setup_config
from shared_initialization import setup_stack_symlinks
class AfterInstallHook(Hook):
def hook(self, env):
import params
env.set_params(params)
setup_stack_symlinks()
setup_config()
link_configs(self.stroutfile)
if __name__ == "__main__":
AfterInstallHook().execute()
|
NicoVarg99/daf-recipes
|
refs/heads/master
|
ckan/ckan/ckanext-harvest/ckanext/harvest/tests/test_action.py
|
1
|
import json
import factories
import unittest
from nose.tools import assert_equal, assert_raises
from nose.plugins.skip import SkipTest
try:
from ckan.tests import factories as ckan_factories
from ckan.tests.helpers import (_get_test_app, reset_db,
FunctionalTestBase, assert_in)
except ImportError:
from ckan.new_tests import factories as ckan_factories
from ckan.new_tests.helpers import (_get_test_app, reset_db,
FunctionalTestBase)
try:
from ckan.new_tests.helpers import assert_in
except ImportError:
# for ckan 2.2
try:
from nose.tools import assert_in
except ImportError:
# Python 2.6 doesn't have it
def assert_in(a, b, msg=None):
assert a in b, msg or '%r was not in %r' % (a, b)
from ckan import plugins as p
from ckan.plugins import toolkit
from ckan import model
from ckanext.harvest.interfaces import IHarvester
import ckanext.harvest.model as harvest_model
def call_action_api(action, apikey=None, status=200, **kwargs):
'''POST an HTTP request to the CKAN API and return the result.
Any additional keyword arguments that you pass to this function as **kwargs
are posted as params to the API.
Usage:
package_dict = call_action_api('package_create', apikey=apikey,
name='my_package')
assert package_dict['name'] == 'my_package'
num_followers = post(app, 'user_follower_count', id='annafan')
If you are expecting an error from the API and want to check the contents
of the error dict, you have to use the status param otherwise an exception
will be raised:
error_dict = call_action_api('group_activity_list', status=403,
id='invalid_id')
assert error_dict['message'] == 'Access Denied'
:param action: the action to post to, e.g. 'package_create'
:type action: string
:param apikey: the API key to put in the Authorization header of the post
(optional, default: None)
:type apikey: string
:param status: the HTTP status code expected in the response from the CKAN
API, e.g. 403, if a different status code is received an exception will
be raised (optional, default: 200)
:type status: int
:param **kwargs: any other keyword arguments passed to this function will
be posted to the API as params
:raises paste.fixture.AppError: if the HTTP status code of the response
from the CKAN API is different from the status param passed to this
function
:returns: the 'result' or 'error' dictionary from the CKAN API response
:rtype: dictionary
'''
params = json.dumps(kwargs)
app = _get_test_app()
response = app.post('/api/action/{0}'.format(action), params=params,
extra_environ={'Authorization': str(apikey)},
status=status)
if status in (200,):
assert response.json['success'] is True
return response.json['result']
else:
assert response.json['success'] is False
return response.json['error']
class MockHarvesterForActionTests(p.SingletonPlugin):
p.implements(IHarvester)
def info(self):
return {'name': 'test-for-action',
'title': 'Test for action',
'description': 'test'}
def validate_config(self, config):
if not config:
return config
try:
config_obj = json.loads(config)
if 'custom_option' in config_obj:
if not isinstance(config_obj['custom_option'], list):
raise ValueError('custom_option must be a list')
except ValueError, e:
raise e
return config
def gather_stage(self, harvest_job):
return []
def fetch_stage(self, harvest_object):
return True
def import_stage(self, harvest_object):
return True
SOURCE_DICT = {
"url": "http://test.action.com",
"name": "test-source-action",
"title": "Test source action",
"notes": "Test source action desc",
"source_type": "test-for-action",
"frequency": "MANUAL",
"config": json.dumps({"custom_option": ["a", "b"]})
}
class ActionBase(object):
@classmethod
def setup_class(cls):
if not p.plugin_loaded('test_action_harvester'):
p.load('test_action_harvester')
def setup(self):
reset_db()
harvest_model.setup()
@classmethod
def teardown_class(cls):
p.unload('test_action_harvester')
class HarvestSourceActionBase(FunctionalTestBase):
@classmethod
def setup_class(cls):
super(HarvestSourceActionBase, cls).setup_class()
harvest_model.setup()
if not p.plugin_loaded('test_action_harvester'):
p.load('test_action_harvester')
@classmethod
def teardown_class(cls):
super(HarvestSourceActionBase, cls).teardown_class()
p.unload('test_action_harvester')
def _get_source_dict(self):
return {
"url": "http://test.action.com",
"name": "test-source-action",
"title": "Test source action",
"notes": "Test source action desc",
"source_type": "test-for-action",
"frequency": "MANUAL",
"config": json.dumps({"custom_option": ["a", "b"]})
}
def test_invalid_missing_values(self):
source_dict = {}
test_data = self._get_source_dict()
if 'id' in test_data:
source_dict['id'] = test_data['id']
sysadmin = ckan_factories.Sysadmin()
result = call_action_api(self.action,
apikey=sysadmin['apikey'], status=409,
**source_dict)
for key in ('name', 'title', 'url', 'source_type'):
assert_equal(result[key], [u'Missing value'])
def test_invalid_unknown_type(self):
source_dict = self._get_source_dict()
source_dict['source_type'] = 'unknown'
sysadmin = ckan_factories.Sysadmin()
result = call_action_api(self.action,
apikey=sysadmin['apikey'], status=409,
**source_dict)
assert 'source_type' in result
assert u'Unknown harvester type' in result['source_type'][0]
def test_invalid_unknown_frequency(self):
wrong_frequency = 'ANNUALLY'
source_dict = self._get_source_dict()
source_dict['frequency'] = wrong_frequency
sysadmin = ckan_factories.Sysadmin()
result = call_action_api(self.action,
apikey=sysadmin['apikey'], status=409,
**source_dict)
assert 'frequency' in result
assert u'Frequency {0} not recognised'.format(wrong_frequency) in result['frequency'][0]
def test_invalid_wrong_configuration(self):
source_dict = self._get_source_dict()
source_dict['config'] = 'not_json'
sysadmin = ckan_factories.Sysadmin()
result = call_action_api(self.action,
apikey=sysadmin['apikey'], status=409,
**source_dict)
assert 'config' in result
assert u'Error parsing the configuration options: No JSON object could be decoded' in result['config'][0]
source_dict['config'] = json.dumps({'custom_option': 'not_a_list'})
result = call_action_api(self.action,
apikey=sysadmin['apikey'], status=409,
**source_dict)
assert 'config' in result
assert u'Error parsing the configuration options: custom_option must be a list' in result['config'][0]
class TestHarvestSourceActionCreate(HarvestSourceActionBase):
def __init__(self):
self.action = 'harvest_source_create'
def test_create(self):
source_dict = self._get_source_dict()
sysadmin = ckan_factories.Sysadmin()
result = call_action_api('harvest_source_create',
apikey=sysadmin['apikey'], **source_dict)
for key in source_dict.keys():
assert_equal(source_dict[key], result[key])
# Check that source was actually created
source = harvest_model.HarvestSource.get(result['id'])
assert_equal(source.url, source_dict['url'])
assert_equal(source.type, source_dict['source_type'])
# Trying to create a source with the same URL fails
source_dict = self._get_source_dict()
source_dict['name'] = 'test-source-action-new'
result = call_action_api('harvest_source_create',
apikey=sysadmin['apikey'], status=409,
**source_dict)
assert 'url' in result
assert u'There already is a Harvest Source for this URL' in result['url'][0]
class HarvestSourceFixtureMixin(object):
def _get_source_dict(self):
'''Not only returns a source_dict, but creates the HarvestSource object
as well - suitable for testing update actions.
'''
source = HarvestSourceActionBase._get_source_dict(self)
source = factories.HarvestSource(**source)
# delete status because it gets in the way of the status supplied to
# call_action_api later on. It is only a generated value, not affecting
# the update/patch anyway.
del source['status']
return source
class TestHarvestSourceActionUpdate(HarvestSourceFixtureMixin,
HarvestSourceActionBase):
def __init__(self):
self.action = 'harvest_source_update'
def test_update(self):
source_dict = self._get_source_dict()
source_dict.update({
"url": "http://test.action.updated.com",
"name": "test-source-action-updated",
"title": "Test source action updated",
"notes": "Test source action desc updated",
"source_type": "test",
"frequency": "MONTHLY",
"config": json.dumps({"custom_option": ["c", "d"]})
})
sysadmin = ckan_factories.Sysadmin()
result = call_action_api('harvest_source_update',
apikey=sysadmin['apikey'], **source_dict)
for key in set(('url', 'name', 'title', 'notes', 'source_type',
'frequency', 'config')):
assert_equal(source_dict[key], result[key], "Key: %s" % key)
# Check that source was actually updated
source = harvest_model.HarvestSource.get(result['id'])
assert_equal(source.url, source_dict['url'])
assert_equal(source.type, source_dict['source_type'])
class TestHarvestSourceActionPatch(HarvestSourceFixtureMixin,
HarvestSourceActionBase):
def __init__(self):
self.action = 'harvest_source_patch'
if toolkit.check_ckan_version(max_version='2.2.99'):
# harvest_source_patch only came in with ckan 2.3
raise SkipTest()
def test_invalid_missing_values(self):
pass
def test_patch(self):
source_dict = self._get_source_dict()
patch_dict = {
"id": source_dict['id'],
"name": "test-source-action-patched",
"url": "http://test.action.patched.com",
"config": json.dumps({"custom_option": ["pat", "ched"]})
}
sysadmin = ckan_factories.Sysadmin()
result = call_action_api('harvest_source_patch',
apikey=sysadmin['apikey'], **patch_dict)
source_dict.update(patch_dict)
for key in set(('url', 'name', 'title', 'notes', 'source_type',
'frequency', 'config')):
assert_equal(source_dict[key], result[key], "Key: %s" % key)
# Check that source was actually updated
source = harvest_model.HarvestSource.get(result['id'])
assert_equal(source.url, source_dict['url'])
assert_equal(source.type, source_dict['source_type'])
class TestActions(ActionBase):
def test_harvest_source_clear(self):
source = factories.HarvestSourceObj(**SOURCE_DICT.copy())
job = factories.HarvestJobObj(source=source)
dataset = ckan_factories.Dataset()
object_ = factories.HarvestObjectObj(job=job, source=source,
package_id=dataset['id'])
context = {'model': model, 'session': model.Session,
'ignore_auth': True, 'user': ''}
result = toolkit.get_action('harvest_source_clear')(
context, {'id': source.id})
assert_equal(result, {'id': source.id})
source = harvest_model.HarvestSource.get(source.id)
assert source
assert_equal(harvest_model.HarvestJob.get(job.id), None)
assert_equal(harvest_model.HarvestObject.get(object_.id), None)
assert_equal(model.Package.get(dataset['id']), None)
def test_harvest_source_job_history_clear(self):
# prepare
source = factories.HarvestSourceObj(**SOURCE_DICT.copy())
job = factories.HarvestJobObj(source=source)
dataset = ckan_factories.Dataset()
object_ = factories.HarvestObjectObj(job=job, source=source,
package_id=dataset['id'])
# execute
context = {'model': model, 'session': model.Session,
'ignore_auth': True, 'user': ''}
result = toolkit.get_action('harvest_source_job_history_clear')(
context, {'id': source.id})
# verify
assert_equal(result, {'id': source.id})
source = harvest_model.HarvestSource.get(source.id)
assert source
assert_equal(harvest_model.HarvestJob.get(job.id), None)
assert_equal(harvest_model.HarvestObject.get(object_.id), None)
dataset_from_db = model.Package.get(dataset['id'])
assert dataset_from_db, 'is None'
assert_equal(dataset_from_db.id, dataset['id'])
def test_harvest_sources_job_history_clear(self):
# prepare
data_dict = SOURCE_DICT.copy()
source_1 = factories.HarvestSourceObj(**data_dict)
data_dict['name'] = 'another-source'
data_dict['url'] = 'http://another-url'
source_2 = factories.HarvestSourceObj(**data_dict)
job_1 = factories.HarvestJobObj(source=source_1)
dataset_1 = ckan_factories.Dataset()
object_1_ = factories.HarvestObjectObj(job=job_1, source=source_1,
package_id=dataset_1['id'])
job_2 = factories.HarvestJobObj(source=source_2)
dataset_2 = ckan_factories.Dataset()
object_2_ = factories.HarvestObjectObj(job=job_2, source=source_2,
package_id=dataset_2['id'])
# execute
context = {'model': model, 'session': model.Session,
'ignore_auth': True, 'user': ''}
result = toolkit.get_action('harvest_sources_job_history_clear')(
context, {})
# verify
assert_equal(
sorted(result),
sorted([{'id': source_1.id}, {'id': source_2.id}]))
source_1 = harvest_model.HarvestSource.get(source_1.id)
assert source_1
assert_equal(harvest_model.HarvestJob.get(job_1.id), None)
assert_equal(harvest_model.HarvestObject.get(object_1_.id), None)
dataset_from_db_1 = model.Package.get(dataset_1['id'])
assert dataset_from_db_1, 'is None'
assert_equal(dataset_from_db_1.id, dataset_1['id'])
source_2 = harvest_model.HarvestSource.get(source_1.id)
assert source_2
assert_equal(harvest_model.HarvestJob.get(job_2.id), None)
assert_equal(harvest_model.HarvestObject.get(object_2_.id), None)
dataset_from_db_2 = model.Package.get(dataset_2['id'])
assert dataset_from_db_2, 'is None'
assert_equal(dataset_from_db_2.id, dataset_2['id'])
def test_harvest_source_create_twice_with_unique_url(self):
data_dict = SOURCE_DICT.copy()
factories.HarvestSourceObj(**data_dict)
site_user = toolkit.get_action('get_site_user')(
{'model': model, 'ignore_auth': True}, {})['name']
data_dict['name'] = 'another-source'
data_dict['url'] = 'http://another-url'
toolkit.get_action('harvest_source_create')(
{'user': site_user}, data_dict)
def test_harvest_source_create_twice_with_same_url(self):
data_dict = SOURCE_DICT.copy()
factories.HarvestSourceObj(**data_dict)
site_user = toolkit.get_action('get_site_user')(
{'model': model, 'ignore_auth': True}, {})['name']
data_dict['name'] = 'another-source'
assert_raises(toolkit.ValidationError,
toolkit.get_action('harvest_source_create'),
{'user': site_user}, data_dict)
def test_harvest_source_create_twice_with_unique_url_and_config(self):
data_dict = SOURCE_DICT.copy()
factories.HarvestSourceObj(**data_dict)
site_user = toolkit.get_action('get_site_user')(
{'model': model, 'ignore_auth': True}, {})['name']
data_dict['name'] = 'another-source'
data_dict['config'] = '{"something": "new"}'
toolkit.get_action('harvest_source_create')(
{'user': site_user}, data_dict)
def test_harvest_job_create_as_sysadmin(self):
source = factories.HarvestSource(**SOURCE_DICT.copy())
site_user = toolkit.get_action('get_site_user')(
{'model': model, 'ignore_auth': True}, {})['name']
data_dict = {
'source_id': source['id'],
'run': True
}
job = toolkit.get_action('harvest_job_create')(
{'user': site_user}, data_dict)
assert_equal(job['source_id'], source['id'])
assert_equal(job['status'], 'Running')
assert_equal(job['gather_started'], None)
assert_in('stats', job.keys())
def test_harvest_job_create_as_admin(self):
# as if an admin user presses 'refresh'
user = ckan_factories.User()
user['capacity'] = 'admin'
org = ckan_factories.Organization(users=[user])
source_dict = dict(SOURCE_DICT.items() +
[('publisher_id', org['id'])])
source = factories.HarvestSource(**source_dict)
data_dict = {
'source_id': source['id'],
'run': True
}
job = toolkit.get_action('harvest_job_create')(
{'user': user['name']}, data_dict)
assert_equal(job['source_id'], source['id'])
assert_equal(job['status'], 'Running')
assert_equal(job['gather_started'], None)
assert_in('stats', job.keys())
class TestHarvestObject(unittest.TestCase):
@classmethod
def setup_class(cls):
reset_db()
harvest_model.setup()
def test_create(self):
job = factories.HarvestJobObj()
context = {
'model': model,
'session': model.Session,
'ignore_auth': True,
}
data_dict = {
'guid': 'guid',
'content': 'content',
'job_id': job.id,
'extras': {'a key': 'a value'},
}
harvest_object = toolkit.get_action('harvest_object_create')(
context, data_dict)
# fetch the object from database to check it was created
created_object = harvest_model.HarvestObject.get(harvest_object['id'])
assert created_object.guid == harvest_object['guid'] == data_dict['guid']
def test_create_bad_parameters(self):
source_a = factories.HarvestSourceObj()
job = factories.HarvestJobObj()
context = {
'model': model,
'session': model.Session,
'ignore_auth': True,
}
data_dict = {
'job_id': job.id,
'source_id': source_a.id,
'extras': 1
}
harvest_object_create = toolkit.get_action('harvest_object_create')
self.assertRaises(toolkit.ValidationError, harvest_object_create,
context, data_dict)
data_dict['extras'] = {'test': 1}
self.assertRaises(toolkit.ValidationError, harvest_object_create,
context, data_dict)
class TestHarvestDBLog(unittest.TestCase):
@classmethod
def setup_class(cls):
reset_db()
harvest_model.setup()
def test_harvest_db_logger(self):
# Create source and check if harvest_log table is populated
data_dict = SOURCE_DICT.copy()
data_dict['source_type'] = 'test'
source = factories.HarvestSourceObj(**data_dict)
content = 'Harvest source created: %s' % source.id
log = harvest_model.Session.query(harvest_model.HarvestLog).\
filter(harvest_model.HarvestLog.content==content).first()
self.assertIsNotNone(log)
self.assertEqual(log.level, 'INFO')
context = {
'model': model,
'session': model.Session,
'ignore_auth': True,
}
data = toolkit.get_action('harvest_log_list')(context, {})
self.assertTrue(len(data) > 0)
self.assertIn('level', data[0])
self.assertIn('content', data[0])
self.assertIn('created', data[0])
self.assertTrue(data[0]['created'] > data[1]['created'])
per_page = 1
data = toolkit.get_action('harvest_log_list')(context, {'level': 'info', 'per_page': per_page})
self.assertEqual(len(data), per_page)
self.assertEqual(data[0]['level'], 'INFO')
|
agarbuno/deepdish
|
refs/heads/master
|
deepdish/parallel/mpi.py
|
2
|
from __future__ import division, print_function, absolute_import
import sys
import itertools as itr
import numpy as np
__all__ = ['rank', 'imap_unordered', 'imap',
'starmap_unordered', 'starmap', 'main']
# Global set of workers - initialized a map function is first called
_g_available_workers = None
_g_initialized = False
# For docstrings, see deepdish.parallel.fallback
def rank():
from mpi4py import MPI
rank = MPI.COMM_WORLD.Get_rank()
return rank
def kill_workers():
from mpi4py import MPI
all_workers = range(1, MPI.COMM_WORLD.Get_size())
for worker in all_workers:
MPI.COMM_WORLD.send(None, dest=worker, tag=666)
def _init():
global _g_available_workers, _g_initialized
from mpi4py import MPI
import atexit
_g_available_workers = set(range(1, MPI.COMM_WORLD.Get_size()))
_g_initialized = True
atexit.register(kill_workers)
def imap_unordered(f, workloads, star=False):
global _g_available_workers, _g_initialized
from mpi4py import MPI
N = MPI.COMM_WORLD.Get_size() - 1
if N == 0 or not _g_initialized:
mapf = [map, itr.starmap][star]
for res in mapf(f, workloads):
yield res
return
for job_index, workload in enumerate(itr.chain(workloads, itr.repeat(None))):
if workload is None and len(_g_available_workers) == N:
break
while not _g_available_workers or workload is None:
# Wait to receive results
status = MPI.Status()
ret = MPI.COMM_WORLD.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
if status.tag == 2:
yield ret['output_data']
_g_available_workers.add(status.source)
if len(_g_available_workers) == N:
break
if _g_available_workers and workload is not None:
dest_rank = _g_available_workers.pop()
# Send off job
task = dict(func=f, input_data=workload, job_index=job_index, unpack=star)
MPI.COMM_WORLD.send(task, dest=dest_rank, tag=10)
def imap(f, workloads, star=False):
global _g_available_workers, _g_initialized
from mpi4py import MPI
N = MPI.COMM_WORLD.Get_size() - 1
if N == 0 or not _g_initialized:
mapf = [map, itr.starmap][star]
for res in mapf(f, workloads):
yield res
return
results = []
indices = []
for job_index, workload in enumerate(itr.chain(workloads, itr.repeat(None))):
if workload is None and len(_g_available_workers) == N:
break
while not _g_available_workers or workload is None:
# Wait to receive results
status = MPI.Status()
ret = MPI.COMM_WORLD.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
if status.tag == 2:
results.append(ret['output_data'])
indices.append(ret['job_index'])
_g_available_workers.add(status.source)
if len(_g_available_workers) == N:
break
if _g_available_workers and workload is not None:
dest_rank = _g_available_workers.pop()
# Send off job
task = dict(func=f, input_data=workload, job_index=job_index, unpack=star)
MPI.COMM_WORLD.send(task, dest=dest_rank, tag=10)
II = np.argsort(indices)
for i in II:
yield results[i]
def starmap(f, workloads):
return imap(f, workloads, star=True)
def starmap_unordered(f, workloads):
return imap_unordered(f, workloads, star=True)
def worker():
from mpi4py import MPI
while True:
status = MPI.Status()
ret = MPI.COMM_WORLD.recv(source=0, tag=MPI.ANY_TAG, status=status)
if status.tag == 10:
# Workload received
func = ret['func']
if ret.get('unpack'):
res = func(*ret['input_data'])
else:
res = func(ret['input_data'])
# Done, let's send it back
MPI.COMM_WORLD.send(dict(job_index=ret['job_index'], output_data=res), dest=0, tag=2)
elif status.tag == 666:
# Kill code
sys.exit(0)
def main(name=None):
if name is not None and name != '__main__':
return False
from mpi4py import MPI
rank = MPI.COMM_WORLD.Get_rank()
if rank == 0:
_init()
return True
else:
worker()
sys.exit(0)
|
openstack/networking-l2gw
|
refs/heads/master
|
networking_l2gw/tests/unit/services/l2gateway/test_agent_scheduler.py
|
1
|
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import copy
import datetime
from unittest import mock
from oslo_config import cfg
from oslo_service import loopingcall
from oslo_utils import timeutils
from neutron.agent.common import utils
from neutron.db import agents_db
from neutron import manager
from neutron.plugins.ml2 import rpc
from neutron.tests import base
from neutron_lib.agent import topics
from neutron_lib import context as neutron_context
from networking_l2gw.services.l2gateway import agent_scheduler
from networking_l2gw.services.l2gateway.common import constants as srv_const
from networking_l2gw.services.l2gateway.common import topics as l2gw_topics
from networking_l2gw.services.l2gateway.service_drivers import agent_api
def make_active_agent(fake_id, fake_agent_type, config=None):
agent_dict = dict(id=fake_id,
agent_type=fake_agent_type,
host='localhost_' + str(fake_id),
heartbeat_timestamp=timeutils.utcnow(),
started_at=timeutils.utcnow(),
configurations=config)
return agent_dict
def make_inactive_agent(fake_id, fake_agent_type, delta, config=None):
agent_dict = dict(id=fake_id,
agent_type=fake_agent_type,
host='remotehost_' + str(fake_id),
heartbeat_timestamp=(timeutils.utcnow() - datetime.
timedelta(delta)),
configurations=config)
return agent_dict
class FakePlugin(agents_db.AgentDbMixin):
def __init__(self):
self.notifier = rpc.AgentNotifierApi(topics.AGENT)
class TestAgentScheduler(base.BaseTestCase):
fake_a_agent_list = []
fake_i_agent_list = []
def setUp(self):
super(TestAgentScheduler, self).setUp()
cfg.CONF.set_override('core_plugin',
"neutron.plugins.ml2.plugin.Ml2Plugin")
self.plugin = FakePlugin()
self.agent_rpc = agent_api.L2gatewayAgentApi(
l2gw_topics.L2GATEWAY_AGENT, cfg.CONF.host)
self.context = neutron_context.get_admin_context()
cfg.CONF.set_override('agent_down_time', 10)
cfg.CONF.set_override('periodic_monitoring_interval', 5)
self.agentsch = agent_scheduler.L2GatewayAgentScheduler(self.agent_rpc,
cfg.CONF)
self.agentsch._plugin = self.plugin
self.agentsch.context = self.context
self.agentsch.agent_ext_support = True
self.LOG = agent_scheduler.LOG
def populate_agent_lists(self, config=None):
self.fake_a_agent_list = []
self.fake_a_agent_list.append(make_active_agent(
'1000', srv_const.AGENT_TYPE_L2GATEWAY, config))
self.fake_i_agent_list = []
self.fake_i_agent_list.append(make_inactive_agent(
'2000', srv_const.AGENT_TYPE_L2GATEWAY, 52, config))
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(agent_scheduler.LOG, 'debug')
@mock.patch.object(agent_scheduler.LOG, 'error')
def test_initialize_thread(self, err, debug, loop_call):
self.agentsch.initialize_thread()
self.assertTrue(loop_call.called)
self.assertTrue(debug.called)
self.assertFalse(err.called)
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall',
side_effect=RuntimeError)
def test_initialize_thread_loop_call_exception(self, loop_call):
with mock.patch.object(self.LOG, 'error') as log_err:
self.agentsch.initialize_thread()
self.assertTrue(loop_call.called)
self.assertTrue(log_err.called)
@mock.patch.object(manager, 'NeutronManager')
def test_select_agent_type_one_active(self, mgr):
config = {srv_const.L2GW_AGENT_TYPE: ''}
self.populate_agent_lists(config)
with mock.patch.object(self.LOG, 'exception'):
self.agentsch._l2gwplugin = mock.Mock()
self.agentsch._select_agent_type(self.context,
self.fake_a_agent_list)
self.agentsch.l2gwplugin.agent_rpc.set_monitor_agent_called_with(
self.context, self.fake_a_agent_list[0]['host'])
@mock.patch.object(manager, 'NeutronManager')
def test_select_agent_type_multiple_active(self, mgr):
config = {srv_const.L2GW_AGENT_TYPE: ''}
self.populate_agent_lists(config)
self.fake_a_agent_list.append(make_active_agent(
'1001', srv_const.AGENT_TYPE_L2GATEWAY, config))
self.agentsch._l2gwplugin = mock.Mock()
with mock.patch.object(self.LOG, 'exception'):
self.agentsch._select_agent_type(self.context,
self.fake_a_agent_list)
self.agentsch.l2gwplugin.agent_rpc.set_monitor_agent_called_with(
self.context, self.fake_a_agent_list[0]['host'])
def test_monitor_agent_state(self):
config = {srv_const.L2GW_AGENT_TYPE: ''}
self.populate_agent_lists(config)
fake_all_agent_list = copy.deepcopy(self.fake_i_agent_list)
fake_all_agent_list.extend(self.fake_a_agent_list)
self.fake_a_agent_list.append(make_active_agent(
'1001', srv_const.AGENT_TYPE_L2GATEWAY, config))
with mock.patch.object(self.agentsch,
'_select_agent_type') as select_agent, \
mock.patch.object(
self.plugin, 'get_agents',
return_value=fake_all_agent_list) as get_agent_list, \
mock.patch.object(utils, 'is_agent_down',
return_value=False) as is_agt:
self.agentsch.monitor_agent_state()
self.assertTrue(get_agent_list.called)
self.assertTrue(select_agent.called)
self.assertTrue(is_agt.called)
def test_monitor_agent_state_exception_get_agents(self):
with mock.patch.object(
self.plugin, 'get_agents',
side_effect=Exception) as get_agent_list, \
mock.patch.object(self.LOG, 'exception') as exception_log:
self.agentsch.monitor_agent_state()
self.assertTrue(get_agent_list.called)
self.assertTrue(exception_log.called)
|
MXWXZ/DuiMini
|
refs/heads/master
|
thirdpart/googletest/googletest/test/googletest-param-test-invalid-name1-test.py
|
122
|
#!/usr/bin/env python
#
# Copyright 2015 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test warns the user when not initialized properly."""
import gtest_test_utils
binary_name = 'googletest-param-test-invalid-name1-test_'
COMMAND = gtest_test_utils.GetTestExecutablePath(binary_name)
def Assert(condition):
if not condition:
raise AssertionError
def TestExitCodeAndOutput(command):
"""Runs the given command and verifies its exit code and output."""
err = ('Parameterized test name \'"InvalidWithQuotes"\' is invalid')
p = gtest_test_utils.Subprocess(command)
Assert(p.terminated_by_signal)
# Verify the output message contains appropriate output
Assert(err in p.output)
class GTestParamTestInvalidName1Test(gtest_test_utils.TestCase):
def testExitCodeAndOutput(self):
TestExitCodeAndOutput(COMMAND)
if __name__ == '__main__':
gtest_test_utils.Main()
|
stanchan/jenkins-job-builder
|
refs/heads/master
|
tests/macros/__init__.py
|
12133432
| |
pepetreshere/odoo
|
refs/heads/patch-2
|
addons/website/models/website_rewrite.py
|
9
|
from odoo import models, fields, api, _
from odoo.exceptions import AccessDenied, ValidationError
import logging
_logger = logging.getLogger(__name__)
class WebsiteRoute(models.Model):
_rec_name = 'path'
_name = 'website.route'
_description = "All Website Route"
_order = 'path'
path = fields.Char('Route')
@api.model
def _name_search(self, name='', args=None, operator='ilike', limit=100, name_get_uid=None):
res = super(WebsiteRoute, self)._name_search(name=name, args=args, operator=operator, limit=limit, name_get_uid=name_get_uid)
if not len(res):
self._refresh()
return super(WebsiteRoute, self)._name_search(name=name, args=args, operator=operator, limit=limit, name_get_uid=name_get_uid)
return res
def _refresh(self):
_logger.debug("Refreshing website.route")
ir_http = self.env['ir.http']
tocreate = []
paths = {rec.path: rec for rec in self.search([])}
for url, _, routing in ir_http._generate_routing_rules(self.pool._init_modules, converters=ir_http._get_converters()):
if 'GET' in (routing.get('methods') or ['GET']):
if paths.get(url):
paths.pop(url)
else:
tocreate.append({'path': url})
if tocreate:
_logger.info("Add %d website.route" % len(tocreate))
self.create(tocreate)
if paths:
find = self.search([('path', 'in', list(paths.keys()))])
_logger.info("Delete %d website.route" % len(find))
find.unlink()
class WebsiteRewrite(models.Model):
_name = 'website.rewrite'
_description = "Website rewrite"
name = fields.Char('Name', required=True)
website_id = fields.Many2one('website', string="Website", ondelete='cascade', index=True)
active = fields.Boolean(default=True)
url_from = fields.Char('URL from', index=True)
route_id = fields.Many2one('website.route')
url_to = fields.Char("URL to")
redirect_type = fields.Selection([
('404', '404 Not Found'),
('301', '301 Moved permanently'),
('302', '302 Moved temporarily'),
('308', '308 Redirect / Rewrite'),
], string='Action', default="302",
help='''Type of redirect/Rewrite:\n
301 Moved permanently: The browser will keep in cache the new url.
302 Moved temporarily: The browser will not keep in cache the new url and ask again the next time the new url.
404 Not Found: If you want remove a specific page/controller (e.g. Ecommerce is installed, but you don't want /shop on a specific website)
308 Redirect / Rewrite: If you want rename a controller with a new url. (Eg: /shop -> /garden - Both url will be accessible but /shop will automatically be redirected to /garden)
''')
sequence = fields.Integer()
@api.onchange('route_id')
def _onchange_route_id(self):
self.url_from = self.route_id.path
self.url_to = self.route_id.path
@api.constrains('url_to', 'redirect_type')
def _check_url_to(self):
for rewrite in self:
if rewrite.redirect_type == '308':
if not rewrite.url_to:
raise ValidationError(_('"URL to" can not be empty.'))
elif not rewrite.url_to.startswith('/'):
raise ValidationError(_('"URL to" must start with a leading slash.'))
def name_get(self):
result = []
for rewrite in self:
name = "%s - %s" % (rewrite.redirect_type, rewrite.name)
result.append((rewrite.id, name))
return result
@api.model
def create(self, vals):
res = super(WebsiteRewrite, self).create(vals)
self._invalidate_routing()
return res
def write(self, vals):
res = super(WebsiteRewrite, self).write(vals)
self._invalidate_routing()
return res
def unlink(self):
res = super(WebsiteRewrite, self).unlink()
self._invalidate_routing()
return res
def _invalidate_routing(self):
# call clear_caches on this worker to reload routing table
self.env['ir.http'].clear_caches()
def refresh_routes(self):
self.env['website.route']._refresh()
|
anryko/ansible
|
refs/heads/devel
|
test/units/modules/network/fortios/test_fortios_facts.py
|
20
|
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from units.modules.utils import exit_json, fail_json
from units.compat import unittest
from units.compat.mock import patch
from ansible.module_utils import basic
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.module_utils.network.fortios.facts.facts import Facts
from ansible.modules.network.fortios import fortios_facts
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_facts.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_facts_get(mocker):
monitor_method_result = {'status': 'success', 'http_method': 'GET', 'http_status': 200}
monitor_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.monitor', return_value=monitor_method_result)
mock_module = patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json)
mock_module._connection = connection_mock
# test case 01, args with single gather_subset
args = {
'vdom': 'root',
'gather_subset': [
{'fact': 'system_status_select'},
]
}
mock_module.params = args
response, ignore = Facts(mock_module, fos_instance).get_facts()
monitor_method_mock.assert_called_with('system', 'status/select', vdom='root')
assert response['ansible_network_resources']['system_status_select']['status'] == 'success'
assert response['ansible_network_resources']['system_status_select']['http_status'] == 200
# test case 02, args with single gather_subset with filters
args = {
'vdom': 'root',
'gather_subset': [
{'fact': 'system_interface_select', 'filters': [{'include_vlan': 'true'}, {'interface_name': 'port3'}]},
]
}
mock_module.params = args
response, ignore = Facts(mock_module, fos_instance).get_facts()
monitor_method_mock.assert_called_with('system', 'interface/select?vdom=root&include_vlan=true&interface_name=port3', vdom=None)
assert response['ansible_network_resources']['system_interface_select']['status'] == 'success'
assert response['ansible_network_resources']['system_interface_select']['http_status'] == 200
# test case 03, args with multiple gather_subset
args = {
'vdom': 'root',
'gather_subset': [
{'fact': 'system_current-admins_select'},
{'fact': 'system_firmware_select'},
{'fact': 'system_fortimanager_status'},
{'fact': 'system_ha-checksums_select'},
]
}
mock_module.params = args
response, ignore = Facts(mock_module, fos_instance).get_facts()
monitor_method_mock.assert_any_call('system', 'current-admins/select', vdom='root')
monitor_method_mock.assert_any_call('system', 'firmware/select', vdom='root')
monitor_method_mock.assert_any_call('system', 'fortimanager/status', vdom='root')
monitor_method_mock.assert_any_call('system', 'ha-checksums/select', vdom='root')
assert response['ansible_network_resources']['system_ha-checksums_select']['status'] == 'success'
assert response['ansible_network_resources']['system_ha-checksums_select']['http_status'] == 200
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.