code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
from __future__ import unicode_literals
try:
import unittest2 as unittest
except ImportError:
import unittest
from rpaths import unicode, PY3, AbstractPath, PosixPath, WindowsPath
class TestAbstract(unittest.TestCase):
def test_construct(self):
"""Tests building an AbstractPath."""
with self.assertRaises(RuntimeError):
AbstractPath('path/to/something')
class TestWindows(unittest.TestCase):
"""Tests for WindowsPath.
"""
def test_construct(self):
"""Tests building paths."""
self.assertEqual(WindowsPath('C:\\',
WindowsPath('some/dir'),
'with',
'files.txt').path,
'C:\\some\\dir\\with\\files.txt')
with self.assertRaises(TypeError):
WindowsPath(WindowsPath('C:\\somedir'), PosixPath('file.sh'))
self.assertEqual((WindowsPath('Users\\R\xE9mi/Desktop') /
WindowsPath(b'pictures/m\xE9chant.jpg')).path,
'Users\\R\xE9mi\\Desktop\\pictures\\m\xE9chant.jpg')
self.assertEqual((WindowsPath('C:\\dir') /
WindowsPath('D:\\other')).path,
'D:\\other')
def test_plus(self):
"""Tests the plus operator."""
self.assertEqual((WindowsPath('some\\file.txt') + '.bak').path,
'some\\file.txt.bak')
with self.assertRaises(TypeError):
WindowsPath('some\\file.txt') + WindowsPath('.bak')
with self.assertRaises(ValueError):
WindowsPath('some\\file.txt') + '.bak/kidding'
with self.assertRaises(ValueError):
WindowsPath('some\\file.txt') + '/backup'
def test_str(self):
"""Tests getting string representations (repr/bytes/unicode)."""
latin = WindowsPath('C:\\r\xE9mi')
nonlatin = WindowsPath('C:\\you like\u203D.txt')
# repr()
self.assertEqual(repr(latin),
"WindowsPath(u'C:\\\\r\\xe9mi')")
self.assertEqual(repr(nonlatin),
"WindowsPath(u'C:\\\\you like\\u203d.txt')")
# bytes()
self.assertEqual(bytes(latin),
b'C:\\r\xe9mi')
self.assertEqual(bytes(nonlatin),
b'C:\\you like?.txt')
# unicode()
self.assertEqual(unicode(latin),
'C:\\r\xe9mi')
self.assertEqual(unicode(nonlatin),
'C:\\you like\u203d.txt')
def test_parts(self):
"""Tests parent, ancestor, name, stem, ext."""
relative = WindowsPath('directory/users\\r\xE9mi/file.txt')
absolute = WindowsPath('\\some/other\\thing.h\xE9h\xE9')
self.assertEqual(relative.parent.path,
'directory\\users\\r\xE9mi')
self.assertEqual(absolute.parent.path,
'\\some\\other')
self.assertEqual(absolute.ancestor(10).path,
'\\')
self.assertEqual(relative.name, 'file.txt')
self.assertEqual(absolute.name, 'thing.h\xE9h\xE9')
self.assertEqual(absolute.unicodename, 'thing.h\xE9h\xE9')
self.assertEqual(absolute.stem, 'thing')
self.assertEqual(absolute.ext, '.h\xE9h\xE9')
self.assertEqual(relative._components(),
['directory', 'users', 'r\xE9mi', 'file.txt'])
self.assertEqual(absolute._components(),
['\\', 'some', 'other', 'thing.h\xE9h\xE9'])
def test_root(self):
"""Tests roots, drives and UNC shares."""
a = WindowsPath(b'some/relative/path')
b = WindowsPath('alsorelative')
c = WindowsPath(b'/this/is/absolute')
d = WindowsPath('C:\\')
e = WindowsPath(b'C:\\also/absolute')
f = WindowsPath('\\\\SOMEMACHINE\\share\\some\\file')
def split_root(f):
return tuple(p.path for p in f.split_root())
self.assertEqual(split_root(a),
('.', 'some\\relative\\path'))
self.assertEqual(split_root(b),
('.', 'alsorelative'))
self.assertFalse(b.is_absolute)
self.assertEqual(split_root(c),
('\\', 'this\\is\\absolute'))
self.assertTrue(c.is_absolute)
self.assertEqual(split_root(d),
('C:\\', '.'))
self.assertTrue(d.is_absolute)
self.assertEqual(d.root.path, 'C:\\')
self.assertEqual(split_root(e),
('C:\\', 'also\\absolute'))
# FIXME : normpath() doesn't behave consistently: puts \ at the end on
# PY3, not on PY2.
self.assertIn(split_root(f),
[('\\\\SOMEMACHINE\\share', 'some\\file'),
('\\\\SOMEMACHINE\\share\\', 'some\\file')])
def test_rel_path_to(self):
"""Tests the rel_path_to method."""
self.assertEqual(WindowsPath('.').rel_path_to(WindowsPath('')).path,
'.')
self.assertEqual(WindowsPath('\\var\\log\\apache2\\').rel_path_to(
'\\var\\www\\cat.jpg').path,
'..\\..\\www\\cat.jpg')
self.assertEqual(WindowsPath('C:\\var\\log\\apache2\\').rel_path_to(
'C:\\tmp\\access.log').path,
'..\\..\\..\\tmp\\access.log')
self.assertEqual(WindowsPath('var\\log').rel_path_to(
'var\\log\\apache2\\access.log').path,
'apache2\\access.log')
self.assertEqual(WindowsPath('\\var\\log\\apache2').rel_path_to(
'\\var\\log\\apache2').path,
'.')
self.assertEqual(WindowsPath('C:\\').rel_path_to(
'C:\\var\\log\\apache2\\access.log').path,
'var\\log\\apache2\\access.log')
self.assertEqual(WindowsPath('\\tmp\\secretdir\\').rel_path_to(
'\\').path,
'..\\..')
self.assertEqual(WindowsPath('C:\\tmp\\secretdir\\').rel_path_to(
'D:\\other\\file.txt').path,
'D:\\other\\file.txt')
with self.assertRaises(TypeError):
WindowsPath('C:\\mydir\\').rel_path_to(PosixPath('/tmp/file'))
def test_lies_under(self):
"""Tests the lies_under method."""
self.assertTrue(WindowsPath('\\tmp')
.lies_under('\\'))
self.assertFalse(WindowsPath('C:\\tmp')
.lies_under('C:\\var'))
self.assertFalse(WindowsPath('\\tmp')
.lies_under('C:\\tmp'))
self.assertFalse(WindowsPath('C:\\')
.lies_under('D:\\tmp'))
self.assertTrue(WindowsPath('\\tmp\\some\\file\\here')
.lies_under('\\tmp\\some'))
self.assertFalse(WindowsPath('\\tmp\\some\\file\\here')
.lies_under('\\tmp\\no'))
self.assertFalse(WindowsPath('C:\\tmp\\some\\file\\here')
.lies_under('C:\\no\\tmp\\some'))
self.assertFalse(WindowsPath('\\tmp\\some\\file\\here')
.lies_under('\\no\\some'))
self.assertTrue(WindowsPath('C:\\tmp\\some\\file\\here')
.lies_under('C:\\tmp\\some\\file\\here'))
self.assertTrue(WindowsPath('\\')
.lies_under('\\'))
self.assertTrue(WindowsPath('')
.lies_under(''))
self.assertTrue(WindowsPath('test')
.lies_under(''))
self.assertFalse(WindowsPath('')
.lies_under('test'))
self.assertFalse(WindowsPath('test')
.lies_under('\\'))
def test_comparisons(self):
"""Tests the comparison operators."""
self.assertTrue(WindowsPath('\\tmp') == WindowsPath('\\tmp'))
self.assertFalse(WindowsPath('C:\\file') != 'c:\\FILE')
self.assertTrue('c:\\FILE' == WindowsPath('C:\\file'))
self.assertFalse(WindowsPath('C:\\file') == WindowsPath('C:\\dir'))
self.assertFalse(WindowsPath('some/file') == PosixPath('some/file'))
self.assertTrue(WindowsPath('path/to/file1') < 'path/to/file2')
self.assertFalse('path/to/file1' >= WindowsPath('path/to/file2'))
if PY3:
with self.assertRaises(TypeError):
WindowsPath('some/file') < PosixPath('other/file')
class TestPosix(unittest.TestCase):
"""Tests for PosixPath.
"""
def test_construct(self):
"""Tests building paths."""
self.assertEqual(PosixPath('/',
PosixPath(b'r\xE9mis/dir'),
'with',
'files.txt').path,
b'/r\xE9mis/dir/with/files.txt')
with self.assertRaises(TypeError):
PosixPath('/tmp/test', WindowsPath('folder'), 'cat.gif')
self.assertEqual((PosixPath(b'/tmp/dir') /
PosixPath('r\xE9mis/files/')).path,
b'/tmp/dir/r\xC3\xA9mis/files')
if PY3:
self.assertEqual(PosixPath('/tmp/r\uDCE9mi').path,
b'/tmp/r\xE9mi')
self.assertEqual((PosixPath(b'/home/test') /
PosixPath('/var/log')).path,
b'/var/log')
def test_plus(self):
"""Tests the plus operator."""
self.assertEqual((PosixPath('some/file.txt') + '.bak').path,
b'some/file.txt.bak')
with self.assertRaises(TypeError):
PosixPath('some/file.txt') + PosixPath('.bak')
with self.assertRaises(ValueError):
PosixPath('some/file.txt') + '.bak/kidding'
with self.assertRaises(ValueError):
PosixPath('some/file.txt') + '/backup'
def test_str(self):
"""Tests getting string representations (repr/bytes/unicode)."""
utf = PosixPath(b'/tmp/r\xC3\xA9mi')
nonutf = PosixPath(b'/tmp/r\xE9mi')
# repr()
self.assertEqual(repr(utf),
"PosixPath(b'/tmp/r\\xc3\\xa9mi')")
self.assertEqual(repr(nonutf),
"PosixPath(b'/tmp/r\\xe9mi')")
# bytes()
self.assertEqual(bytes(utf),
b'/tmp/r\xC3\xA9mi')
self.assertEqual(bytes(nonutf),
b'/tmp/r\xE9mi')
# unicode()
self.assertEqual(unicode(utf),
'/tmp/r\xE9mi')
self.assertEqual(unicode(nonutf),
'/tmp/r\uDCE9mi' if PY3 else '/tmp/r\uFFFDmi')
def test_parts(self):
"""Tests parent, ancestor, name, stem, ext."""
relative = PosixPath(b'directory/users/r\xE9mi/file.txt')
absolute = PosixPath('/some/other/thing.h\xE9h\xE9')
self.assertEqual(relative.parent.path,
b'directory/users/r\xE9mi')
self.assertEqual(absolute.parent.path,
b'/some/other')
self.assertEqual(absolute.ancestor(10).path,
b'/')
self.assertEqual(relative.name, b'file.txt')
self.assertEqual(absolute.name, b'thing.h\xC3\xA9h\xC3\xA9')
self.assertEqual(absolute.unicodename, 'thing.h\xE9h\xE9')
self.assertEqual(absolute.stem, b'thing')
self.assertEqual(absolute.ext, b'.h\xC3\xA9h\xC3\xA9')
self.assertEqual(relative._components(),
[b'directory', b'users', b'r\xE9mi', b'file.txt'])
self.assertEqual(absolute._components(),
[b'/', b'some',
b'other', b'thing.h\xC3\xA9h\xC3\xA9'])
def test_root(self):
"""Tests roots."""
a = PosixPath(b'some/relative/path')
b = PosixPath('alsorelative')
c = PosixPath(b'/this/is/absolute')
d = PosixPath('/')
def split_root(f):
return tuple(p.path for p in f.split_root())
# FIXME : This behaves weirdly because of normpath(). Do we want this?
self.assertEqual(split_root(a),
(b'.', b'some/relative/path'))
self.assertEqual(split_root(b),
(b'.', b'alsorelative'))
self.assertFalse(b.is_absolute)
self.assertEqual(split_root(c),
(b'/', b'this/is/absolute'))
self.assertTrue(c.is_absolute)
self.assertEqual(split_root(d),
(b'/', b'.'))
self.assertTrue(d.is_absolute)
self.assertEqual(d.root.path, b'/')
def test_rel_path_to(self):
"""Tests the rel_path_to method."""
self.assertEqual(PosixPath('.').rel_path_to(PosixPath('')).path,
b'.')
self.assertEqual(PosixPath(b'/var/log/apache2/').rel_path_to(
b'/var/www/cat.jpg').path,
b'../../www/cat.jpg')
self.assertEqual(PosixPath(b'/var/log/apache2/').rel_path_to(
b'/tmp/access.log').path,
b'../../../tmp/access.log')
self.assertEqual(PosixPath(b'var/log').rel_path_to(
b'var/log/apache2/access.log').path,
b'apache2/access.log')
self.assertEqual(PosixPath(b'/var/log/apache2').rel_path_to(
b'/var/log/apache2').path,
b'.')
self.assertEqual(PosixPath(b'/').rel_path_to(
b'/var/log/apache2/access.log').path,
b'var/log/apache2/access.log')
self.assertEqual(PosixPath(b'/tmp/secretdir/').rel_path_to(
b'/').path,
b'../..')
def test_lies_under(self):
""" Tests the lies_under method."""
self.assertTrue(PosixPath(b'/tmp')
.lies_under(b'/'))
self.assertFalse(PosixPath(b'/tmp')
.lies_under(b'/var'))
self.assertTrue(PosixPath(b'/tmp/some/file/here')
.lies_under(b'/tmp/some'))
self.assertFalse(PosixPath(b'/tmp/some/file/here')
.lies_under(b'/tmp/no'))
self.assertFalse(PosixPath(b'/tmp/some/file/here')
.lies_under(b'/no/tmp/some'))
self.assertFalse(PosixPath(b'/tmp/some/file/here')
.lies_under(b'/no/some'))
self.assertTrue(PosixPath(b'/tmp/some/file/here')
.lies_under(b'/tmp/some/file/here'))
self.assertTrue(PosixPath(b'/')
.lies_under(b'/'))
self.assertTrue(PosixPath(b'')
.lies_under(b''))
self.assertTrue(PosixPath(b'test')
.lies_under(b''))
self.assertFalse(PosixPath(b'')
.lies_under(b'test'))
self.assertFalse(PosixPath(b'test')
.lies_under(b'/'))
def test_comparisons(self):
"""Tests the comparison operators."""
self.assertTrue(PosixPath(b'/tmp/r\xE9mi') == b'/tmp/r\xE9mi')
self.assertTrue(PosixPath(b'/file') != b'/FILE')
self.assertFalse(PosixPath(b'file') == PosixPath(b'dir'))
self.assertFalse(WindowsPath('some/file') == PosixPath('some/file'))
self.assertTrue(PosixPath(b'path/to/file1') < b'path/to/file2')
self.assertFalse(b'path/to/file1' >= PosixPath(b'path/to/file2'))
if PY3:
with self.assertRaises(TypeError):
WindowsPath('some/file') < PosixPath('other/file')
|
remram44/rpaths
|
tests/test_abstract.py
|
Python
|
bsd-3-clause
| 15,818
|
import unittest
from exporters.readers.base_reader import BaseReader
from exporters.readers.random_reader import RandomReader
from .utils import meta
class BaseReaderTest(unittest.TestCase):
def setUp(self):
self.reader = BaseReader({}, meta())
def test_get_next_batch_not_implemented(self):
with self.assertRaises(NotImplementedError):
self.reader.get_next_batch()
def test_set_last_position(self):
self.reader.set_last_position(dict(position=5))
self.assertEqual(self.reader.last_position, dict(position=5))
class RandomReaderTest(unittest.TestCase):
def setUp(self):
self.options = {
'exporter_options': {
'log_level': 'DEBUG',
'logger_name': 'export-pipeline'
},
'reader': {
'name': 'exporters.readers.random_reader.RandomReader',
'options': {
'number_of_items': 1000,
'batch_size': 100
}
},
}
self.reader = RandomReader(self.options, meta())
self.reader.set_last_position(None)
def test_get_next_batch(self):
batch = list(self.reader.get_next_batch())
self.assertEqual(len(batch), self.options['reader']['options']['batch_size'])
def test_get_second_batch(self):
self.reader.get_next_batch()
batch = list(self.reader.get_next_batch())
self.assertEqual(len(batch), self.options['reader']['options']['batch_size'])
self.assertEqual(self.reader.get_metadata('read_items'),
self.options['reader']['options']['batch_size'])
def test_get_all(self):
total_items = 0
while not self.reader.finished:
batch = list(self.reader.get_next_batch())
total_items += len(batch)
self.assertEqual(total_items, self.options['reader']['options']['number_of_items'])
def test_set_last_position_none(self):
self.reader.set_last_position({'last_read': 123})
self.assertEqual({'last_read': 123}, self.reader.last_position)
|
scrapinghub/exporters
|
tests/test_readers.py
|
Python
|
bsd-3-clause
| 2,129
|
"""Accessors for Amber TI datasets.
"""
from os.path import dirname, join
from glob import glob
from .. import Bunch
def load_bace_improper():
"""Load Amber Bace improper solvated vdw example
Returns
-------
data: Bunch
Dictionary-like object, the interesting attributes are:
- 'data' : the data files for improper solvated vdw alchemical leg
"""
module_path = dirname(__file__)
data = {'vdw': glob(join(module_path, 'bace_improper/solvated/vdw/*/ti-*.out.bz2'))}
with open(join(module_path, 'bace_improper', 'descr.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data,
DESCR=fdescr)
def load_bace_example():
"""Load Amber Bace example perturbation.
Returns
-------
data: Bunch
Dictionary-like object, the interesting attributes are:
- 'data' : the data files by system and alchemical leg
"""
module_path = dirname(__file__)
data = {'complex':
{'decharge': glob(join(module_path, 'bace_CAT-13d~CAT-17a/complex/decharge/*/ti-*.out.bz2')),
'recharge': glob(join(module_path, 'bace_CAT-13d~CAT-17a/complex/recharge/*/ti-*.out.bz2')),
'vdw': glob(join(module_path, 'bace_CAT-13d~CAT-17a/complex/vdw/*/ti-*.out.bz2'))
},
'solvated':
{'decharge': glob(join(module_path, 'bace_CAT-13d~CAT-17a/solvated/decharge/*/ti-*.out.bz2')),
'recharge': glob(join(module_path, 'bace_CAT-13d~CAT-17a/solvated/recharge/*/ti-*.out.bz2')),
'vdw': glob(join(module_path, 'bace_CAT-13d~CAT-17a/solvated/vdw/*/ti-*.out.bz2'))
}
}
with open(join(module_path, 'bace_CAT-13d~CAT-17a', 'descr.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data,
DESCR=fdescr)
def load_simplesolvated():
"""Load the Amber solvated dataset.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
- 'data' : the data files by alchemical leg
- 'DESCR': the full description of the dataset
"""
module_path = dirname(__file__)
data = {'charge': glob(join(module_path, 'simplesolvated/charge/*/ti-*.out')),
'vdw': glob(join(module_path, 'simplesolvated/vdw/*/ti-*.out'))}
with open(join(module_path, 'simplesolvated', 'descr.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data,
DESCR=fdescr)
def load_invalidfiles():
"""Load the invalid files.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
- 'data' : the example of invalid data files
- 'DESCR': the full description of the dataset
"""
module_path = dirname(__file__)
data = [glob(join(module_path, 'invalidfiles/*.out.bz2'))]
with open(join(module_path, 'invalidfiles', 'descr.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data,
DESCR=fdescr)
|
alchemistry/alchemtest
|
src/alchemtest/amber/access.py
|
Python
|
bsd-3-clause
| 3,102
|
import sys
from django.core.management.base import BaseCommand
from ietf.community.constants import SIGNIFICANT_STATES
from ietf.community.models import DocumentChangeDates
from ietf.doc.models import Document
class Command(BaseCommand):
help = (u"Update drafts in community lists by reviewing their rules")
def handle(self, *args, **options):
documents = Document.objects.filter(type='draft')
index = 1
total = documents.count()
for doc in documents.iterator():
(changes, created) = DocumentChangeDates.objects.get_or_create(document=doc)
new_version = doc.latest_event(type='new_revision')
normal_change = doc.latest_event()
significant_change = None
for event in doc.docevent_set.filter(type='changed_document'):
for state in SIGNIFICANT_STATES:
if ('<b>%s</b>' % state) in event.desc:
significant_change = event
break
changes.new_version_date = new_version and new_version.time.date()
changes.normal_change_date = normal_change and normal_change.time.date()
changes.significant_change_date = significant_change and significant_change.time.date()
changes.save()
sys.stdout.write('Document %s/%s\r' % (index, total))
sys.stdout.flush()
index += 1
print
|
wpjesus/codematch
|
ietf/community/management/commands/update_doc_change_dates.py
|
Python
|
bsd-3-clause
| 1,440
|
"""
Utilities and helper functions
"""
def get_object_or_none(model, **kwargs):
try:
return model.objects.get(**kwargs)
except model.DoesNotExist:
return None
|
chhantyal/exchange
|
uhura/exchange/utils.py
|
Python
|
bsd-3-clause
| 183
|
""" Utility module to determine the OS Python running on
--------------------------------------------------------------------------
File: utilsOsType.py
Overview: Python module to supply functions and an enumeration to
help determine the platform type, bit size and OS currently
being used.
--------------------------------------------------------------------------
"""
# Python modules:
import sys # Provide system information
# Third party modules:
# In-house modules:
# Instantiations:
# Enumerations:
#-----------------------------------------------------------------------------
# Details: Class to implement a 'C' style enumeration type.
# Gotchas: None.
# Authors: Illya Rudkin 28/11/2013.
# Changes: None.
#--
if sys.version_info.major >= 3:
from enum import Enum
class EnumOsType(Enum):
Unknown = 0
Darwin = 1
FreeBSD = 2
Linux = 3
NetBSD = 4
Windows = 5
kFreeBSD = 6
else:
class EnumOsType(object):
values = ["Unknown",
"Darwin",
"FreeBSD",
"Linux",
"NetBSD",
"OpenBSD",
"Windows",
"kFreeBSD"]
class __metaclass__(type):
#++----------------------------------------------------------------
# Details: Fn acts as an enumeration.
# Args: vName - (R) Enumeration to match.
# Returns: Int - Matching enumeration/index.
# Throws: None.
#--
def __getattr__(cls, vName):
return cls.values.index(vName)
#++---------------------------------------------------------------------------
# Details: Reverse fast lookup of the values list.
# Args: vI - (R) Index / enumeration.
# Returns: Str - text description matching enumeration.
# Throws: None.
#--
def name_of(cls, vI):
return EnumOsType.values[vI]
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
#++---------------------------------------------------------------------------
# Details: Determine what operating system is currently running on.
# Args: None.
# Returns: EnumOsType - The OS type being used ATM.
# Throws: None.
#--
def determine_os_type():
eOSType = EnumOsType.Unknown
strOS = sys.platform
if strOS == "darwin":
eOSType = EnumOsType.Darwin
elif strOS.startswith("freebsd"):
eOSType = EnumOsType.FreeBSD
elif strOS.startswith("linux"):
eOSType = EnumOsType.Linux
elif strOS.startswith("netbsd"):
eOSType = EnumOsType.NetBSD
elif strOS.startswith("openbsd"):
eOSType = EnumOsType.OpenBSD
elif strOS == "win32":
eOSType = EnumOsType.Windows
elif strOS.startswith("gnukfreebsd"):
eOSType = EnumOsType.kFreeBSD
return eOSType
|
youtube/cobalt
|
third_party/llvm-project/lldb/scripts/utilsOsType.py
|
Python
|
bsd-3-clause
| 3,130
|
#!/usr/bin/env python2
from taptaptap.proc import plan, ok, not_ok, out
plan(first=1, last=13)
ok('Starting the program')
ok('Starting the engine')
ok('Find the object')
ok('Grab it', todo=True)
ok('Use it', todo=True)
2 * 2 == 4 and ok('2 * 2 == 4') or not_ok('2 * 2 != 4')
out()
## validity: -1
## ok testcases: 6 / 13
## bailout: no
## stderr: 2 * 2 == 4
## stderr: TODO
## stderr: ~TRUE
## stderr: ~True
## stderr: ~true
|
meisterluk/taptaptap
|
tests/proc_005.py
|
Python
|
bsd-3-clause
| 472
|
import logging
import matplotlib as mpl
from .tools import get_figure_size
_logger = logging.getLogger("mpl_settings")
orig_settings = {**mpl.rcParams}
latex_settings = {
# change this if using contex, xetex or lualatex
"pgf.texsystem": "pdflatex",
# use LaTeX to write all text
"text.usetex": True,
'font.family': 'lmodern',
# blank entries should cause plots to inherit fonts from the document
# "font.serif": [],
# "font.sans-serif": [],
# "font.monospace": [],
# "text.fontsize": 11,
"legend.fontsize": 9, # Make the legend/label fonts a little smaller
"xtick.labelsize": 9,
"ytick.labelsize": 9,
"figure.figsize": get_figure_size(1), # default fig size of 1\textwidth
"lines.linewidth": 0.5,
"axes.labelsize": 11, # LaTeX default is 10pt font.
"axes.linewidth": 0.5,
"axes.unicode_minus": False,
# subfig related
"figure.subplot.left": 0.1,
"figure.subplot.right": 0.95,
"figure.subplot.bottom": 0.125,
"figure.subplot.top": 0.95,
# the amount of width reserved for blank space between subplots
"figure.subplot.wspace": 0.4,
# the amount of height reserved for white space between subplots
"figure.subplot.hspace": 0.4,
# Patches are graphical objects that fill 2D space, like polygons or circles
"patch.linewidth": 0.5,
}
def enable_latex():
_logger.info("LaTeX export enabled")
mpl.rcParams['text.latex.preamble'].append(r'\usepackage{lmodern}'),
mpl.rcParams['text.latex.preamble'].append(r'\usepackage{siunitx}'),
mpl.rcParams.update(latex_settings)
def disable_latex():
_logger.info("LaTeX export disabled")
mpl.rcParams = orig_settings
|
cklb/PyMoskito
|
pymoskito/mpl_settings.py
|
Python
|
bsd-3-clause
| 1,697
|
{% if cookiecutter.use_celery == 'y' %}
import os
from celery import Celery
from django.apps import apps, AppConfig
from django.conf import settings
if not settings.configured:
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local') # pragma: no cover
app = Celery('{{cookiecutter.project_slug}}')
class CeleryConfig(AppConfig):
name = '{{cookiecutter.project_slug}}.taskapp'
verbose_name = 'Celery Config'
def ready(self):
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
installed_apps = [app_config.name for app_config in apps.get_app_configs()]
app.autodiscover_tasks(lambda: installed_apps, force=True)
{% if cookiecutter.use_sentry_for_error_reporting == 'y' -%}
if hasattr(settings, 'RAVEN_CONFIG'):
# Celery signal registration
from raven import Client as RavenClient
from raven.contrib.celery import register_signal as raven_register_signal
from raven.contrib.celery import register_logger_signal as raven_register_logger_signal
raven_client = RavenClient(dsn=settings.RAVEN_CONFIG['DSN'])
raven_register_logger_signal(raven_client)
raven_register_signal(raven_client)
{%- endif %}
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request)) # pragma: no cover
{% else %}
# Use this as a starting point for your project with celery.
# If you are not using celery, you can remove this app
{% endif -%}
|
asyncee/cookiecutter-django
|
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/taskapp/celery.py
|
Python
|
bsd-3-clause
| 1,701
|
import numpy as np
def multiprod(A, B):
"""
Inspired by MATLAB multiprod function by Paolo de Leva. A and B are
assumed to be arrays containing M matrices, that is, A and B have
dimensions A: (M, N, P), B:(M, P, Q). multiprod multiplies each matrix
in A with the corresponding matrix in B, using matrix multiplication.
so multiprod(A, B) has dimensions (M, N, Q).
"""
# First check if we have been given just one matrix
if len(np.shape(A)) == 2:
return np.dot(A, B)
# Old (slower) implementation:
# a = A.reshape(np.hstack([np.shape(A), [1]]))
# b = B.reshape(np.hstack([[np.shape(B)[0]], [1], np.shape(B)[1:]]))
# return np.sum(a * b, axis=2)
# Approx 5x faster, only supported by numpy version >= 1.6:
return np.einsum('ijk,ikl->ijl', A, B)
def multitransp(A):
"""
Inspired by MATLAB multitransp function by Paolo de Leva. A is assumed to
be an array containing M matrices, each of which has dimension N x P.
That is, A is an M x N x P array. Multitransp then returns an array
containing the M matrix transposes of the matrices in A, each of which
will be P x N.
"""
# First check if we have been given just one matrix
if A.ndim == 2:
return A.T
return np.transpose(A, (0, 2, 1))
def multisym(A):
# Inspired by MATLAB multisym function by Nicholas Boumal.
return 0.5 * (A + multitransp(A))
def multiskew(A):
# Inspired by MATLAB multiskew function by Nicholas Boumal.
return 0.5 * (A - multitransp(A))
def multieye(k, n):
# Creates a k x n x n array containing k (n x n) identity matrices.
return np.tile(np.eye(n), (k, 1, 1))
def multilog(A, pos_def=False):
if not pos_def:
raise NotImplementedError
# Computes the logm of each matrix in an array containing k positive
# definite matrices. This is much faster than scipy.linalg.logm even
# for a single matrix. Could potentially be improved further.
l, v = np.linalg.eigh(A)
l = np.expand_dims(np.log(l), axis=-1)
return multiprod(v, l * multitransp(v))
def multiexp(A, sym=False):
if not sym:
raise NotImplementedError
# Compute the expm of each matrix in an array of k symmetric matrices.
# Sometimes faster than scipy.linalg.expm even for a single matrix.
l, v = np.linalg.eigh(A)
l = np.expand_dims(np.exp(l), axis=-1)
return multiprod(v, l * multitransp(v))
|
tingelst/pymanopt
|
pymanopt/tools/multi.py
|
Python
|
bsd-3-clause
| 2,443
|
import os
import sys
import json
import time
import numpy
import dendropy
from collections import defaultdict
import pdb
def parse_site_rates(rate_file, correction = 1, test = False, count = 0):
"""Parse the site rate file returned from hyphy to a vector of rates"""
# for whatever reason, when run in a virtualenv (and perhaps in other
# cases, the file does not seem to be written quite before we try
# to read it. so, pause and try to re-read up to three-times.
try:
data = json.load(open(rate_file, 'r'))
except IOError as e:
if count <= 3:
count += 1
time.sleep(0.1)
parse_site_rates(rate_file, correction, test, count)
else:
raise IOError("Cannot open {0}: {1}".format(rate_file, e))
rates = numpy.array([line["rate"] for line in data["sites"]["rates"]])
corrected = rates/correction
if not test:
data["sites"]["corrected_rates"] = [{"site":k + 1,"rate":v} \
for k,v in enumerate(corrected)]
json.dump(data, open(rate_file,'w'), indent = 4)
return corrected
def correct_branch_lengths(tree_file, format, output_dir):
"""Scale branch lengths to values shorter than 100"""
tree = dendropy.Tree.get_from_path(tree_file, format)
depth = tree.seed_node.distance_from_tip()
mean_branch_length = tree.length()/(2 * len(tree.leaf_nodes()) - 3)
string_len = len(str(int(mean_branch_length + 0.5)))
if string_len > 1:
correction_factor = 10 ** string_len
else:
correction_factor = 1
for edge in tree.preorder_edge_iter():
if edge.length:
edge.length /= correction_factor
pth = os.path.join(output_dir, '{0}.corrected.newick'.format(
os.path.basename(tree_file)
))
tree.write_to_path(pth, 'newick')
return pth, correction_factor
def get_net_pi_for_periods(pi, times):
"""Sum across the PI values for the requested times"""
sums = numpy.nansum(pi, axis=1)[times]
return dict(zip(times, sums))
def get_informative_sites(alignment, threshold=4):
"""Returns a list, where True indicates a site which was over the threshold
for informativeness.
"""
taxa = dendropy.DnaCharacterMatrix.get_from_path(alignment, 'nexus')
results = defaultdict(int)
for cells in taxa.vectors():
assert len(cells) == taxa.vector_size # should all have equal lengths
for idx, cell in enumerate(cells):
results[idx] += 1 if str(cell).upper() in "ATGC" else 0
return numpy.array([1 if results[x] >= threshold else numpy.nan for x in sorted(results)])
def cull_uninformative_rates(rates, inform):
"""Zeroes out rates which are uninformative"""
return rates * inform
|
faircloth-lab/rhino
|
rhino/core.py
|
Python
|
bsd-3-clause
| 2,759
|
# python filter_transcript_counts.py < transcript_counts.txt > active_transcripts.txt
import sys
print "Gene\tTranscript\tExpression"
for l in sys.stdin:
t = l.strip().split('\t')
if float(t[2]) > 1.1:
print '\t'.join(t[0:3])
|
ahonkela/pol2rna
|
python/filter_transcript_counts.py
|
Python
|
bsd-3-clause
| 244
|
from django.shortcuts import render
from django.views.generic.base import TemplateView
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.contrib.syndication.views import Feed
from django.utils.feedgenerator import Atom1Feed
from blog.models import Post
from taggit.models import Tag
class BlogHomeView(ListView):
template_name = 'blog/home.html'
context_object_name = 'posts'
paginate_by = 10
def get_queryset(self):
posts = Post.objects.order_by('-pub_date')
if self.request.user.is_superuser:
return posts
else:
return posts.filter(is_published=True)
class BlogPostView(DetailView):
context_object_name = 'post'
template_name = 'blog/post.html'
def get_queryset(self):
if self.request.user.is_superuser:
return Post.objects.all()
return Post.objects.filter(is_published=True)
class BlogTagView(TemplateView):
template_name = 'blog/tag.html'
def get_context_data(self, **kwargs):
context = super(BlogTagView, self).get_context_data(**kwargs)
tagslug = self.kwargs['slug']
tag = Tag.objects.get(slug=tagslug)
context['tag'] = tag.name
context['taggedposts'] = (Post.objects
.filter(is_published=True)
.filter(tags__name=tag.name)
.distinct())
context['published_tags'] = Post.objects.filter(is_published=True)
return context
class BlogRssFeed(Feed):
title = "Brandon Waskiewicz's blog"
link = '/blog/'
description = 'Inside my head'
def items(self):
return Post.objects.filter(is_published=True).order_by('-pub_date')
def item_title(self, item):
return item.name
def item_description(self, item):
return item.get_preview()
class BlogAtomFeed(BlogRssFeed):
feed_type = Atom1Feed
subtitle = BlogRssFeed.title
|
brandonw/personal-site
|
personal-site/blog/views.py
|
Python
|
bsd-3-clause
| 1,941
|
"""Polynomial factorization routines in characteristic zero. """
from __future__ import print_function, division
from sympy.polys.galoistools import (
gf_from_int_poly, gf_to_int_poly,
gf_lshift, gf_add_mul, gf_mul,
gf_div, gf_rem,
gf_gcdex,
gf_sqf_p,
gf_factor_sqf, gf_factor)
from sympy.polys.densebasic import (
dup_LC, dmp_LC, dmp_ground_LC,
dup_TC,
dup_convert, dmp_convert,
dup_degree, dmp_degree,
dmp_degree_in, dmp_degree_list,
dmp_from_dict,
dmp_zero_p,
dmp_one,
dmp_nest, dmp_raise,
dup_strip,
dmp_ground,
dup_inflate,
dmp_exclude, dmp_include,
dmp_inject, dmp_eject,
dup_terms_gcd, dmp_terms_gcd)
from sympy.polys.densearith import (
dup_neg, dmp_neg,
dup_add, dmp_add,
dup_sub, dmp_sub,
dup_mul, dmp_mul,
dup_sqr,
dmp_pow,
dup_div, dmp_div,
dup_quo, dmp_quo,
dmp_expand,
dmp_add_mul,
dup_sub_mul, dmp_sub_mul,
dup_lshift,
dup_max_norm, dmp_max_norm,
dup_l1_norm,
dup_mul_ground, dmp_mul_ground,
dup_quo_ground, dmp_quo_ground)
from sympy.polys.densetools import (
dup_clear_denoms, dmp_clear_denoms,
dup_trunc, dmp_ground_trunc,
dup_content,
dup_monic, dmp_ground_monic,
dup_primitive, dmp_ground_primitive,
dmp_eval_tail,
dmp_eval_in, dmp_diff_eval_in,
dmp_compose,
dup_shift, dup_mirror)
from sympy.polys.euclidtools import (
dmp_primitive,
dup_inner_gcd, dmp_inner_gcd)
from sympy.polys.sqfreetools import (
dup_sqf_p,
dup_sqf_norm, dmp_sqf_norm,
dup_sqf_part, dmp_sqf_part)
from sympy.polys.polyutils import _sort_factors
from sympy.polys.polyconfig import query
from sympy.polys.polyerrors import (
ExtraneousFactors, DomainError, CoercionFailed, EvaluationFailed)
from sympy.ntheory import nextprime, isprime, factorint
from sympy.utilities import subsets
from math import ceil as _ceil, log as _log
from sympy.core.compatibility import range
def dup_trial_division(f, factors, K):
"""
Determine multiplicities of factors for a univariate polynomial
using trial division.
"""
result = []
for factor in factors:
k = 0
while True:
q, r = dup_div(f, factor, K)
if not r:
f, k = q, k + 1
else:
break
result.append((factor, k))
return _sort_factors(result)
def dmp_trial_division(f, factors, u, K):
"""
Determine multiplicities of factors for a multivariate polynomial
using trial division.
"""
result = []
for factor in factors:
k = 0
while True:
q, r = dmp_div(f, factor, u, K)
if dmp_zero_p(r, u):
f, k = q, k + 1
else:
break
result.append((factor, k))
return _sort_factors(result)
def dup_zz_mignotte_bound(f, K):
"""Mignotte bound for univariate polynomials in `K[x]`. """
a = dup_max_norm(f, K)
b = abs(dup_LC(f, K))
n = dup_degree(f)
return K.sqrt(K(n + 1))*2**n*a*b
def dmp_zz_mignotte_bound(f, u, K):
"""Mignotte bound for multivariate polynomials in `K[X]`. """
a = dmp_max_norm(f, u, K)
b = abs(dmp_ground_LC(f, u, K))
n = sum(dmp_degree_list(f, u))
return K.sqrt(K(n + 1))*2**n*a*b
def dup_zz_hensel_step(m, f, g, h, s, t, K):
"""
One step in Hensel lifting in `Z[x]`.
Given positive integer `m` and `Z[x]` polynomials `f`, `g`, `h`, `s`
and `t` such that::
f = g*h (mod m)
s*g + t*h = 1 (mod m)
lc(f) is not a zero divisor (mod m)
lc(h) = 1
deg(f) = deg(g) + deg(h)
deg(s) < deg(h)
deg(t) < deg(g)
returns polynomials `G`, `H`, `S` and `T`, such that::
f = G*H (mod m**2)
S*G + T*H = 1 (mod m**2)
References
==========
.. [1] [Gathen99]_
"""
M = m**2
e = dup_sub_mul(f, g, h, K)
e = dup_trunc(e, M, K)
q, r = dup_div(dup_mul(s, e, K), h, K)
q = dup_trunc(q, M, K)
r = dup_trunc(r, M, K)
u = dup_add(dup_mul(t, e, K), dup_mul(q, g, K), K)
G = dup_trunc(dup_add(g, u, K), M, K)
H = dup_trunc(dup_add(h, r, K), M, K)
u = dup_add(dup_mul(s, G, K), dup_mul(t, H, K), K)
b = dup_trunc(dup_sub(u, [K.one], K), M, K)
c, d = dup_div(dup_mul(s, b, K), H, K)
c = dup_trunc(c, M, K)
d = dup_trunc(d, M, K)
u = dup_add(dup_mul(t, b, K), dup_mul(c, G, K), K)
S = dup_trunc(dup_sub(s, d, K), M, K)
T = dup_trunc(dup_sub(t, u, K), M, K)
return G, H, S, T
def dup_zz_hensel_lift(p, f, f_list, l, K):
"""
Multifactor Hensel lifting in `Z[x]`.
Given a prime `p`, polynomial `f` over `Z[x]` such that `lc(f)`
is a unit modulo `p`, monic pair-wise coprime polynomials `f_i`
over `Z[x]` satisfying::
f = lc(f) f_1 ... f_r (mod p)
and a positive integer `l`, returns a list of monic polynomials
`F_1`, `F_2`, ..., `F_r` satisfying::
f = lc(f) F_1 ... F_r (mod p**l)
F_i = f_i (mod p), i = 1..r
References
==========
.. [1] [Gathen99]_
"""
r = len(f_list)
lc = dup_LC(f, K)
if r == 1:
F = dup_mul_ground(f, K.gcdex(lc, p**l)[0], K)
return [ dup_trunc(F, p**l, K) ]
m = p
k = r // 2
d = int(_ceil(_log(l, 2)))
g = gf_from_int_poly([lc], p)
for f_i in f_list[:k]:
g = gf_mul(g, gf_from_int_poly(f_i, p), p, K)
h = gf_from_int_poly(f_list[k], p)
for f_i in f_list[k + 1:]:
h = gf_mul(h, gf_from_int_poly(f_i, p), p, K)
s, t, _ = gf_gcdex(g, h, p, K)
g = gf_to_int_poly(g, p)
h = gf_to_int_poly(h, p)
s = gf_to_int_poly(s, p)
t = gf_to_int_poly(t, p)
for _ in range(1, d + 1):
(g, h, s, t), m = dup_zz_hensel_step(m, f, g, h, s, t, K), m**2
return dup_zz_hensel_lift(p, g, f_list[:k], l, K) \
+ dup_zz_hensel_lift(p, h, f_list[k:], l, K)
def _test_pl(fc, q, pl):
if q > pl // 2:
q = q - pl
if not q:
return True
return fc % q == 0
def dup_zz_zassenhaus(f, K):
"""Factor primitive square-free polynomials in `Z[x]`. """
n = dup_degree(f)
if n == 1:
return [f]
fc = f[-1]
A = dup_max_norm(f, K)
b = dup_LC(f, K)
B = int(abs(K.sqrt(K(n + 1))*2**n*A*b))
C = int((n + 1)**(2*n)*A**(2*n - 1))
gamma = int(_ceil(2*_log(C, 2)))
bound = int(2*gamma*_log(gamma))
a = []
# choose a prime number `p` such that `f` be square free in Z_p
# if there are many factors in Z_p, choose among a few different `p`
# the one with fewer factors
for px in range(3, bound + 1):
if not isprime(px) or b % px == 0:
continue
px = K.convert(px)
F = gf_from_int_poly(f, px)
if not gf_sqf_p(F, px, K):
continue
fsqfx = gf_factor_sqf(F, px, K)[1]
a.append((px, fsqfx))
if len(fsqfx) < 15 or len(a) > 4:
break
p, fsqf = min(a, key=lambda x: len(x[1]))
l = int(_ceil(_log(2*B + 1, p)))
modular = [gf_to_int_poly(ff, p) for ff in fsqf]
g = dup_zz_hensel_lift(p, f, modular, l, K)
sorted_T = range(len(g))
T = set(sorted_T)
factors, s = [], 1
pl = p**l
while 2*s <= len(T):
for S in subsets(sorted_T, s):
# lift the constant coefficient of the product `G` of the factors
# in the subset `S`; if it is does not divide `fc`, `G` does
# not divide the input polynomial
if b == 1:
q = 1
for i in S:
q = q*g[i][-1]
q = q % pl
if not _test_pl(fc, q, pl):
continue
else:
G = [b]
for i in S:
G = dup_mul(G, g[i], K)
G = dup_trunc(G, pl, K)
G = dup_primitive(G, K)[1]
q = G[-1]
if q and fc % q != 0:
continue
H = [b]
S = set(S)
T_S = T - S
if b == 1:
G = [b]
for i in S:
G = dup_mul(G, g[i], K)
G = dup_trunc(G, pl, K)
for i in T_S:
H = dup_mul(H, g[i], K)
H = dup_trunc(H, pl, K)
G_norm = dup_l1_norm(G, K)
H_norm = dup_l1_norm(H, K)
if G_norm*H_norm <= B:
T = T_S
sorted_T = [i for i in sorted_T if i not in S]
G = dup_primitive(G, K)[1]
f = dup_primitive(H, K)[1]
factors.append(G)
b = dup_LC(f, K)
break
else:
s += 1
return factors + [f]
def dup_zz_irreducible_p(f, K):
"""Test irreducibility using Eisenstein's criterion. """
lc = dup_LC(f, K)
tc = dup_TC(f, K)
e_fc = dup_content(f[1:], K)
if e_fc:
e_ff = factorint(int(e_fc))
for p in e_ff.keys():
if (lc % p) and (tc % p**2):
return True
def dup_cyclotomic_p(f, K, irreducible=False):
"""
Efficiently test if ``f`` is a cyclotomic polynomial.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> f = x**16 + x**14 - x**10 + x**8 - x**6 + x**2 + 1
>>> R.dup_cyclotomic_p(f)
False
>>> g = x**16 + x**14 - x**10 - x**8 - x**6 + x**2 + 1
>>> R.dup_cyclotomic_p(g)
True
"""
if K.is_QQ:
try:
K0, K = K, K.get_ring()
f = dup_convert(f, K0, K)
except CoercionFailed:
return False
elif not K.is_ZZ:
return False
lc = dup_LC(f, K)
tc = dup_TC(f, K)
if lc != 1 or (tc != -1 and tc != 1):
return False
if not irreducible:
coeff, factors = dup_factor_list(f, K)
if coeff != K.one or factors != [(f, 1)]:
return False
n = dup_degree(f)
g, h = [], []
for i in range(n, -1, -2):
g.insert(0, f[i])
for i in range(n - 1, -1, -2):
h.insert(0, f[i])
g = dup_sqr(dup_strip(g), K)
h = dup_sqr(dup_strip(h), K)
F = dup_sub(g, dup_lshift(h, 1, K), K)
if K.is_negative(dup_LC(F, K)):
F = dup_neg(F, K)
if F == f:
return True
g = dup_mirror(f, K)
if K.is_negative(dup_LC(g, K)):
g = dup_neg(g, K)
if F == g and dup_cyclotomic_p(g, K):
return True
G = dup_sqf_part(F, K)
if dup_sqr(G, K) == F and dup_cyclotomic_p(G, K):
return True
return False
def dup_zz_cyclotomic_poly(n, K):
"""Efficiently generate n-th cyclotomic polynomial. """
h = [K.one, -K.one]
for p, k in factorint(n).items():
h = dup_quo(dup_inflate(h, p, K), h, K)
h = dup_inflate(h, p**(k - 1), K)
return h
def _dup_cyclotomic_decompose(n, K):
H = [[K.one, -K.one]]
for p, k in factorint(n).items():
Q = [ dup_quo(dup_inflate(h, p, K), h, K) for h in H ]
H.extend(Q)
for i in range(1, k):
Q = [ dup_inflate(q, p, K) for q in Q ]
H.extend(Q)
return H
def dup_zz_cyclotomic_factor(f, K):
"""
Efficiently factor polynomials `x**n - 1` and `x**n + 1` in `Z[x]`.
Given a univariate polynomial `f` in `Z[x]` returns a list of factors
of `f`, provided that `f` is in the form `x**n - 1` or `x**n + 1` for
`n >= 1`. Otherwise returns None.
Factorization is performed using cyclotomic decomposition of `f`,
which makes this method much faster that any other direct factorization
approach (e.g. Zassenhaus's).
References
==========
.. [1] [Weisstein09]_
"""
lc_f, tc_f = dup_LC(f, K), dup_TC(f, K)
if dup_degree(f) <= 0:
return None
if lc_f != 1 or tc_f not in [-1, 1]:
return None
if any(bool(cf) for cf in f[1:-1]):
return None
n = dup_degree(f)
F = _dup_cyclotomic_decompose(n, K)
if not K.is_one(tc_f):
return F
else:
H = []
for h in _dup_cyclotomic_decompose(2*n, K):
if h not in F:
H.append(h)
return H
def dup_zz_factor_sqf(f, K):
"""Factor square-free (non-primitive) polynomials in `Z[x]`. """
cont, g = dup_primitive(f, K)
n = dup_degree(g)
if dup_LC(g, K) < 0:
cont, g = -cont, dup_neg(g, K)
if n <= 0:
return cont, []
elif n == 1:
return cont, [g]
if query('USE_IRREDUCIBLE_IN_FACTOR'):
if dup_zz_irreducible_p(g, K):
return cont, [g]
factors = None
if query('USE_CYCLOTOMIC_FACTOR'):
factors = dup_zz_cyclotomic_factor(g, K)
if factors is None:
factors = dup_zz_zassenhaus(g, K)
return cont, _sort_factors(factors, multiple=False)
def dup_zz_factor(f, K):
"""
Factor (non square-free) polynomials in `Z[x]`.
Given a univariate polynomial `f` in `Z[x]` computes its complete
factorization `f_1, ..., f_n` into irreducibles over integers::
f = content(f) f_1**k_1 ... f_n**k_n
The factorization is computed by reducing the input polynomial
into a primitive square-free polynomial and factoring it using
Zassenhaus algorithm. Trial division is used to recover the
multiplicities of factors.
The result is returned as a tuple consisting of::
(content(f), [(f_1, k_1), ..., (f_n, k_n))
Examples
========
Consider the polynomial `f = 2*x**4 - 2`::
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_zz_factor(2*x**4 - 2)
(2, [(x - 1, 1), (x + 1, 1), (x**2 + 1, 1)])
In result we got the following factorization::
f = 2 (x - 1) (x + 1) (x**2 + 1)
Note that this is a complete factorization over integers,
however over Gaussian integers we can factor the last term.
By default, polynomials `x**n - 1` and `x**n + 1` are factored
using cyclotomic decomposition to speedup computations. To
disable this behaviour set cyclotomic=False.
References
==========
.. [1] [Gathen99]_
"""
cont, g = dup_primitive(f, K)
n = dup_degree(g)
if dup_LC(g, K) < 0:
cont, g = -cont, dup_neg(g, K)
if n <= 0:
return cont, []
elif n == 1:
return cont, [(g, 1)]
if query('USE_IRREDUCIBLE_IN_FACTOR'):
if dup_zz_irreducible_p(g, K):
return cont, [(g, 1)]
g = dup_sqf_part(g, K)
H = None
if query('USE_CYCLOTOMIC_FACTOR'):
H = dup_zz_cyclotomic_factor(g, K)
if H is None:
H = dup_zz_zassenhaus(g, K)
factors = dup_trial_division(f, H, K)
return cont, factors
def dmp_zz_wang_non_divisors(E, cs, ct, K):
"""Wang/EEZ: Compute a set of valid divisors. """
result = [ cs*ct ]
for q in E:
q = abs(q)
for r in reversed(result):
while r != 1:
r = K.gcd(r, q)
q = q // r
if K.is_one(q):
return None
result.append(q)
return result[1:]
def dmp_zz_wang_test_points(f, T, ct, A, u, K):
"""Wang/EEZ: Test evaluation points for suitability. """
if not dmp_eval_tail(dmp_LC(f, K), A, u - 1, K):
raise EvaluationFailed('no luck')
g = dmp_eval_tail(f, A, u, K)
if not dup_sqf_p(g, K):
raise EvaluationFailed('no luck')
c, h = dup_primitive(g, K)
if K.is_negative(dup_LC(h, K)):
c, h = -c, dup_neg(h, K)
v = u - 1
E = [ dmp_eval_tail(t, A, v, K) for t, _ in T ]
D = dmp_zz_wang_non_divisors(E, c, ct, K)
if D is not None:
return c, h, E
else:
raise EvaluationFailed('no luck')
def dmp_zz_wang_lead_coeffs(f, T, cs, E, H, A, u, K):
"""Wang/EEZ: Compute correct leading coefficients. """
C, J, v = [], [0]*len(E), u - 1
for h in H:
c = dmp_one(v, K)
d = dup_LC(h, K)*cs
for i in reversed(range(len(E))):
k, e, (t, _) = 0, E[i], T[i]
while not (d % e):
d, k = d//e, k + 1
if k != 0:
c, J[i] = dmp_mul(c, dmp_pow(t, k, v, K), v, K), 1
C.append(c)
if any(not j for j in J):
raise ExtraneousFactors # pragma: no cover
CC, HH = [], []
for c, h in zip(C, H):
d = dmp_eval_tail(c, A, v, K)
lc = dup_LC(h, K)
if K.is_one(cs):
cc = lc//d
else:
g = K.gcd(lc, d)
d, cc = d//g, lc//g
h, cs = dup_mul_ground(h, d, K), cs//d
c = dmp_mul_ground(c, cc, v, K)
CC.append(c)
HH.append(h)
if K.is_one(cs):
return f, HH, CC
CCC, HHH = [], []
for c, h in zip(CC, HH):
CCC.append(dmp_mul_ground(c, cs, v, K))
HHH.append(dmp_mul_ground(h, cs, 0, K))
f = dmp_mul_ground(f, cs**(len(H) - 1), u, K)
return f, HHH, CCC
def dup_zz_diophantine(F, m, p, K):
"""Wang/EEZ: Solve univariate Diophantine equations. """
if len(F) == 2:
a, b = F
f = gf_from_int_poly(a, p)
g = gf_from_int_poly(b, p)
s, t, G = gf_gcdex(g, f, p, K)
s = gf_lshift(s, m, K)
t = gf_lshift(t, m, K)
q, s = gf_div(s, f, p, K)
t = gf_add_mul(t, q, g, p, K)
s = gf_to_int_poly(s, p)
t = gf_to_int_poly(t, p)
result = [s, t]
else:
G = [F[-1]]
for f in reversed(F[1:-1]):
G.insert(0, dup_mul(f, G[0], K))
S, T = [], [[1]]
for f, g in zip(F, G):
t, s = dmp_zz_diophantine([g, f], T[-1], [], 0, p, 1, K)
T.append(t)
S.append(s)
result, S = [], S + [T[-1]]
for s, f in zip(S, F):
s = gf_from_int_poly(s, p)
f = gf_from_int_poly(f, p)
r = gf_rem(gf_lshift(s, m, K), f, p, K)
s = gf_to_int_poly(r, p)
result.append(s)
return result
def dmp_zz_diophantine(F, c, A, d, p, u, K):
"""Wang/EEZ: Solve multivariate Diophantine equations. """
if not A:
S = [ [] for _ in F ]
n = dup_degree(c)
for i, coeff in enumerate(c):
if not coeff:
continue
T = dup_zz_diophantine(F, n - i, p, K)
for j, (s, t) in enumerate(zip(S, T)):
t = dup_mul_ground(t, coeff, K)
S[j] = dup_trunc(dup_add(s, t, K), p, K)
else:
n = len(A)
e = dmp_expand(F, u, K)
a, A = A[-1], A[:-1]
B, G = [], []
for f in F:
B.append(dmp_quo(e, f, u, K))
G.append(dmp_eval_in(f, a, n, u, K))
C = dmp_eval_in(c, a, n, u, K)
v = u - 1
S = dmp_zz_diophantine(G, C, A, d, p, v, K)
S = [ dmp_raise(s, 1, v, K) for s in S ]
for s, b in zip(S, B):
c = dmp_sub_mul(c, s, b, u, K)
c = dmp_ground_trunc(c, p, u, K)
m = dmp_nest([K.one, -a], n, K)
M = dmp_one(n, K)
for k in K.map(range(0, d)):
if dmp_zero_p(c, u):
break
M = dmp_mul(M, m, u, K)
C = dmp_diff_eval_in(c, k + 1, a, n, u, K)
if not dmp_zero_p(C, v):
C = dmp_quo_ground(C, K.factorial(k + 1), v, K)
T = dmp_zz_diophantine(G, C, A, d, p, v, K)
for i, t in enumerate(T):
T[i] = dmp_mul(dmp_raise(t, 1, v, K), M, u, K)
for i, (s, t) in enumerate(zip(S, T)):
S[i] = dmp_add(s, t, u, K)
for t, b in zip(T, B):
c = dmp_sub_mul(c, t, b, u, K)
c = dmp_ground_trunc(c, p, u, K)
S = [ dmp_ground_trunc(s, p, u, K) for s in S ]
return S
def dmp_zz_wang_hensel_lifting(f, H, LC, A, p, u, K):
"""Wang/EEZ: Parallel Hensel lifting algorithm. """
S, n, v = [f], len(A), u - 1
H = list(H)
for i, a in enumerate(reversed(A[1:])):
s = dmp_eval_in(S[0], a, n - i, u - i, K)
S.insert(0, dmp_ground_trunc(s, p, v - i, K))
d = max(dmp_degree_list(f, u)[1:])
for j, s, a in zip(range(2, n + 2), S, A):
G, w = list(H), j - 1
I, J = A[:j - 2], A[j - 1:]
for i, (h, lc) in enumerate(zip(H, LC)):
lc = dmp_ground_trunc(dmp_eval_tail(lc, J, v, K), p, w - 1, K)
H[i] = [lc] + dmp_raise(h[1:], 1, w - 1, K)
m = dmp_nest([K.one, -a], w, K)
M = dmp_one(w, K)
c = dmp_sub(s, dmp_expand(H, w, K), w, K)
dj = dmp_degree_in(s, w, w)
for k in K.map(range(0, dj)):
if dmp_zero_p(c, w):
break
M = dmp_mul(M, m, w, K)
C = dmp_diff_eval_in(c, k + 1, a, w, w, K)
if not dmp_zero_p(C, w - 1):
C = dmp_quo_ground(C, K.factorial(k + 1), w - 1, K)
T = dmp_zz_diophantine(G, C, I, d, p, w - 1, K)
for i, (h, t) in enumerate(zip(H, T)):
h = dmp_add_mul(h, dmp_raise(t, 1, w - 1, K), M, w, K)
H[i] = dmp_ground_trunc(h, p, w, K)
h = dmp_sub(s, dmp_expand(H, w, K), w, K)
c = dmp_ground_trunc(h, p, w, K)
if dmp_expand(H, u, K) != f:
raise ExtraneousFactors # pragma: no cover
else:
return H
def dmp_zz_wang(f, u, K, mod=None, seed=None):
"""
Factor primitive square-free polynomials in `Z[X]`.
Given a multivariate polynomial `f` in `Z[x_1,...,x_n]`, which is
primitive and square-free in `x_1`, computes factorization of `f` into
irreducibles over integers.
The procedure is based on Wang's Enhanced Extended Zassenhaus
algorithm. The algorithm works by viewing `f` as a univariate polynomial
in `Z[x_2,...,x_n][x_1]`, for which an evaluation mapping is computed::
x_2 -> a_2, ..., x_n -> a_n
where `a_i`, for `i = 2, ..., n`, are carefully chosen integers. The
mapping is used to transform `f` into a univariate polynomial in `Z[x_1]`,
which can be factored efficiently using Zassenhaus algorithm. The last
step is to lift univariate factors to obtain true multivariate
factors. For this purpose a parallel Hensel lifting procedure is used.
The parameter ``seed`` is passed to _randint and can be used to seed randint
(when an integer) or (for testing purposes) can be a sequence of numbers.
References
==========
.. [1] [Wang78]_
.. [2] [Geddes92]_
"""
from sympy.utilities.randtest import _randint
randint = _randint(seed)
ct, T = dmp_zz_factor(dmp_LC(f, K), u - 1, K)
b = dmp_zz_mignotte_bound(f, u, K)
p = K(nextprime(b))
if mod is None:
if u == 1:
mod = 2
else:
mod = 1
history, configs, A, r = set([]), [], [K.zero]*u, None
try:
cs, s, E = dmp_zz_wang_test_points(f, T, ct, A, u, K)
_, H = dup_zz_factor_sqf(s, K)
r = len(H)
if r == 1:
return [f]
configs = [(s, cs, E, H, A)]
except EvaluationFailed:
pass
eez_num_configs = query('EEZ_NUMBER_OF_CONFIGS')
eez_num_tries = query('EEZ_NUMBER_OF_TRIES')
eez_mod_step = query('EEZ_MODULUS_STEP')
while len(configs) < eez_num_configs:
for _ in range(eez_num_tries):
A = [ K(randint(-mod, mod)) for _ in range(u) ]
if tuple(A) not in history:
history.add(tuple(A))
else:
continue
try:
cs, s, E = dmp_zz_wang_test_points(f, T, ct, A, u, K)
except EvaluationFailed:
continue
_, H = dup_zz_factor_sqf(s, K)
rr = len(H)
if r is not None:
if rr != r: # pragma: no cover
if rr < r:
configs, r = [], rr
else:
continue
else:
r = rr
if r == 1:
return [f]
configs.append((s, cs, E, H, A))
if len(configs) == eez_num_configs:
break
else:
mod += eez_mod_step
s_norm, s_arg, i = None, 0, 0
for s, _, _, _, _ in configs:
_s_norm = dup_max_norm(s, K)
if s_norm is not None:
if _s_norm < s_norm:
s_norm = _s_norm
s_arg = i
else:
s_norm = _s_norm
i += 1
_, cs, E, H, A = configs[s_arg]
orig_f = f
try:
f, H, LC = dmp_zz_wang_lead_coeffs(f, T, cs, E, H, A, u, K)
factors = dmp_zz_wang_hensel_lifting(f, H, LC, A, p, u, K)
except ExtraneousFactors: # pragma: no cover
if query('EEZ_RESTART_IF_NEEDED'):
return dmp_zz_wang(orig_f, u, K, mod + 1)
else:
raise ExtraneousFactors(
"we need to restart algorithm with better parameters")
result = []
for f in factors:
_, f = dmp_ground_primitive(f, u, K)
if K.is_negative(dmp_ground_LC(f, u, K)):
f = dmp_neg(f, u, K)
result.append(f)
return result
def dmp_zz_factor(f, u, K):
"""
Factor (non square-free) polynomials in `Z[X]`.
Given a multivariate polynomial `f` in `Z[x]` computes its complete
factorization `f_1, ..., f_n` into irreducibles over integers::
f = content(f) f_1**k_1 ... f_n**k_n
The factorization is computed by reducing the input polynomial
into a primitive square-free polynomial and factoring it using
Enhanced Extended Zassenhaus (EEZ) algorithm. Trial division
is used to recover the multiplicities of factors.
The result is returned as a tuple consisting of::
(content(f), [(f_1, k_1), ..., (f_n, k_n))
Consider polynomial `f = 2*(x**2 - y**2)`::
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_zz_factor(2*x**2 - 2*y**2)
(2, [(x - y, 1), (x + y, 1)])
In result we got the following factorization::
f = 2 (x - y) (x + y)
References
==========
.. [1] [Gathen99]_
"""
if not u:
return dup_zz_factor(f, K)
if dmp_zero_p(f, u):
return K.zero, []
cont, g = dmp_ground_primitive(f, u, K)
if dmp_ground_LC(g, u, K) < 0:
cont, g = -cont, dmp_neg(g, u, K)
if all(d <= 0 for d in dmp_degree_list(g, u)):
return cont, []
G, g = dmp_primitive(g, u, K)
factors = []
if dmp_degree(g, u) > 0:
g = dmp_sqf_part(g, u, K)
H = dmp_zz_wang(g, u, K)
factors = dmp_trial_division(f, H, u, K)
for g, k in dmp_zz_factor(G, u - 1, K)[1]:
factors.insert(0, ([g], k))
return cont, _sort_factors(factors)
def dup_ext_factor(f, K):
"""Factor univariate polynomials over algebraic number fields. """
n, lc = dup_degree(f), dup_LC(f, K)
f = dup_monic(f, K)
if n <= 0:
return lc, []
if n == 1:
return lc, [(f, 1)]
f, F = dup_sqf_part(f, K), f
s, g, r = dup_sqf_norm(f, K)
factors = dup_factor_list_include(r, K.dom)
if len(factors) == 1:
return lc, [(f, n//dup_degree(f))]
H = s*K.unit
for i, (factor, _) in enumerate(factors):
h = dup_convert(factor, K.dom, K)
h, _, g = dup_inner_gcd(h, g, K)
h = dup_shift(h, H, K)
factors[i] = h
factors = dup_trial_division(F, factors, K)
return lc, factors
def dmp_ext_factor(f, u, K):
"""Factor multivariate polynomials over algebraic number fields. """
if not u:
return dup_ext_factor(f, K)
lc = dmp_ground_LC(f, u, K)
f = dmp_ground_monic(f, u, K)
if all(d <= 0 for d in dmp_degree_list(f, u)):
return lc, []
f, F = dmp_sqf_part(f, u, K), f
s, g, r = dmp_sqf_norm(f, u, K)
factors = dmp_factor_list_include(r, u, K.dom)
if len(factors) == 1:
factors = [f]
else:
H = dmp_raise([K.one, s*K.unit], u, 0, K)
for i, (factor, _) in enumerate(factors):
h = dmp_convert(factor, u, K.dom, K)
h, _, g = dmp_inner_gcd(h, g, u, K)
h = dmp_compose(h, H, u, K)
factors[i] = h
return lc, dmp_trial_division(F, factors, u, K)
def dup_gf_factor(f, K):
"""Factor univariate polynomials over finite fields. """
f = dup_convert(f, K, K.dom)
coeff, factors = gf_factor(f, K.mod, K.dom)
for i, (f, k) in enumerate(factors):
factors[i] = (dup_convert(f, K.dom, K), k)
return K.convert(coeff, K.dom), factors
def dmp_gf_factor(f, u, K):
"""Factor multivariate polynomials over finite fields. """
raise NotImplementedError('multivariate polynomials over finite fields')
def dup_factor_list(f, K0):
"""Factor univariate polynomials into irreducibles in `K[x]`. """
j, f = dup_terms_gcd(f, K0)
cont, f = dup_primitive(f, K0)
if K0.is_FiniteField:
coeff, factors = dup_gf_factor(f, K0)
elif K0.is_Algebraic:
coeff, factors = dup_ext_factor(f, K0)
else:
if not K0.is_Exact:
K0_inexact, K0 = K0, K0.get_exact()
f = dup_convert(f, K0_inexact, K0)
else:
K0_inexact = None
if K0.is_Field:
K = K0.get_ring()
denom, f = dup_clear_denoms(f, K0, K)
f = dup_convert(f, K0, K)
else:
K = K0
if K.is_ZZ:
coeff, factors = dup_zz_factor(f, K)
elif K.is_Poly:
f, u = dmp_inject(f, 0, K)
coeff, factors = dmp_factor_list(f, u, K.dom)
for i, (f, k) in enumerate(factors):
factors[i] = (dmp_eject(f, u, K), k)
coeff = K.convert(coeff, K.dom)
else: # pragma: no cover
raise DomainError('factorization not supported over %s' % K0)
if K0.is_Field:
for i, (f, k) in enumerate(factors):
factors[i] = (dup_convert(f, K, K0), k)
coeff = K0.convert(coeff, K)
coeff = K0.quo(coeff, denom)
if K0_inexact:
for i, (f, k) in enumerate(factors):
max_norm = dup_max_norm(f, K0)
f = dup_quo_ground(f, max_norm, K0)
f = dup_convert(f, K0, K0_inexact)
factors[i] = (f, k)
coeff = K0.mul(coeff, K0.pow(max_norm, k))
coeff = K0_inexact.convert(coeff, K0)
K0 = K0_inexact
if j:
factors.insert(0, ([K0.one, K0.zero], j))
return coeff*cont, _sort_factors(factors)
def dup_factor_list_include(f, K):
"""Factor univariate polynomials into irreducibles in `K[x]`. """
coeff, factors = dup_factor_list(f, K)
if not factors:
return [(dup_strip([coeff]), 1)]
else:
g = dup_mul_ground(factors[0][0], coeff, K)
return [(g, factors[0][1])] + factors[1:]
def dmp_factor_list(f, u, K0):
"""Factor multivariate polynomials into irreducibles in `K[X]`. """
if not u:
return dup_factor_list(f, K0)
J, f = dmp_terms_gcd(f, u, K0)
cont, f = dmp_ground_primitive(f, u, K0)
if K0.is_FiniteField: # pragma: no cover
coeff, factors = dmp_gf_factor(f, u, K0)
elif K0.is_Algebraic:
coeff, factors = dmp_ext_factor(f, u, K0)
else:
if not K0.is_Exact:
K0_inexact, K0 = K0, K0.get_exact()
f = dmp_convert(f, u, K0_inexact, K0)
else:
K0_inexact = None
if K0.is_Field:
K = K0.get_ring()
denom, f = dmp_clear_denoms(f, u, K0, K)
f = dmp_convert(f, u, K0, K)
else:
K = K0
if K.is_ZZ:
levels, f, v = dmp_exclude(f, u, K)
coeff, factors = dmp_zz_factor(f, v, K)
for i, (f, k) in enumerate(factors):
factors[i] = (dmp_include(f, levels, v, K), k)
elif K.is_Poly:
f, v = dmp_inject(f, u, K)
coeff, factors = dmp_factor_list(f, v, K.dom)
for i, (f, k) in enumerate(factors):
factors[i] = (dmp_eject(f, v, K), k)
coeff = K.convert(coeff, K.dom)
else: # pragma: no cover
raise DomainError('factorization not supported over %s' % K0)
if K0.is_Field:
for i, (f, k) in enumerate(factors):
factors[i] = (dmp_convert(f, u, K, K0), k)
coeff = K0.convert(coeff, K)
coeff = K0.quo(coeff, denom)
if K0_inexact:
for i, (f, k) in enumerate(factors):
max_norm = dmp_max_norm(f, u, K0)
f = dmp_quo_ground(f, max_norm, u, K0)
f = dmp_convert(f, u, K0, K0_inexact)
factors[i] = (f, k)
coeff = K0.mul(coeff, K0.pow(max_norm, k))
coeff = K0_inexact.convert(coeff, K0)
K0 = K0_inexact
for i, j in enumerate(reversed(J)):
if not j:
continue
term = {(0,)*(u - i) + (1,) + (0,)*i: K0.one}
factors.insert(0, (dmp_from_dict(term, u, K0), j))
return coeff*cont, _sort_factors(factors)
def dmp_factor_list_include(f, u, K):
"""Factor multivariate polynomials into irreducibles in `K[X]`. """
if not u:
return dup_factor_list_include(f, K)
coeff, factors = dmp_factor_list(f, u, K)
if not factors:
return [(dmp_ground(coeff, u), 1)]
else:
g = dmp_mul_ground(factors[0][0], coeff, u, K)
return [(g, factors[0][1])] + factors[1:]
def dup_irreducible_p(f, K):
"""
Returns ``True`` if a univariate polynomial ``f`` has no factors
over its domain.
"""
return dmp_irreducible_p(f, 0, K)
def dmp_irreducible_p(f, u, K):
"""
Returns ``True`` if a multivariate polynomial ``f`` has no factors
over its domain.
"""
_, factors = dmp_factor_list(f, u, K)
if not factors:
return True
elif len(factors) > 1:
return False
else:
_, k = factors[0]
return k == 1
|
kaushik94/sympy
|
sympy/polys/factortools.py
|
Python
|
bsd-3-clause
| 34,338
|
# -*- coding: utf-8 -*-
from djangocms_text_ckeditor.models import Text
from django.contrib.admin.sites import site
from django.contrib.admin.utils import unquote
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser, Group, Permission
from django.contrib.sites.models import Site
from django.core.management import call_command
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.test.client import RequestFactory
from django.test.utils import override_settings
from django.utils.translation import override as force_language
from cms.api import (add_plugin, assign_user_to_page, create_page,
create_page_user, publish_page)
from cms.admin.forms import save_permissions
from cms.cms_menus import get_visible_nodes
from cms.management.commands.subcommands.moderator import log
from cms.models import Page, CMSPlugin, Title, ACCESS_PAGE
from cms.models.permissionmodels import (ACCESS_DESCENDANTS,
ACCESS_PAGE_AND_DESCENDANTS,
PagePermission,
GlobalPagePermission)
from cms.plugin_pool import plugin_pool
from cms.test_utils.testcases import (URL_CMS_PAGE_ADD, CMSTestCase)
from cms.test_utils.util.context_managers import disable_logger
from cms.test_utils.util.fuzzy_int import FuzzyInt
from cms.utils import get_current_site
from cms.utils.page import get_page_from_path
from cms.utils.page_permissions import user_can_publish_page, user_can_view_page
def fake_tree_attrs(page):
page.depth = 1
page.path = '0001'
page.numchild = 0
@override_settings(CMS_PERMISSION=True)
class PermissionModeratorTests(CMSTestCase):
"""Permissions and moderator together
Fixtures contains 3 users and 1 published page and some other stuff
Users:
1. `super`: superuser
2. `master`: user with permissions to all applications
3. `slave`: user assigned to page `slave-home`
Pages:
1. `home`:
- published page
- master can do anything on its subpages, but not on home!
2. `master`:
- published page
- created by super
- `master` can do anything on it and its descendants
- subpages:
3. `slave-home`:
- not published
- assigned slave user which can add/change/delete/
move/publish this page and its descendants
- `master` user want to moderate this page and all descendants
4. `pageA`:
- created by super
- master can add/change/delete on it and descendants
"""
#TODO: Split this test case into one that tests publish functionality, and
#TODO: one that tests permission inheritance. This is too complex.
def setUp(self):
# create super user
self.user_super = self._create_user("super", is_staff=True,
is_superuser=True)
self.user_staff = self._create_user("staff", is_staff=True,
add_default_permissions=True)
self.add_permission(self.user_staff, 'publish_page')
self.user_master = self._create_user("master", is_staff=True,
add_default_permissions=True)
self.add_permission(self.user_master, 'publish_page')
self.user_slave = self._create_user("slave", is_staff=True,
add_default_permissions=True)
self.user_normal = self._create_user("normal", is_staff=False)
self.user_normal.user_permissions.add(
Permission.objects.get(codename='publish_page'))
with self.login_user_context(self.user_super):
self.home_page = create_page("home", "nav_playground.html", "en",
created_by=self.user_super)
# master page & master user
self.master_page = create_page("master", "nav_playground.html", "en")
# create non global, non staff user
self.user_non_global = self._create_user("nonglobal")
# assign master user under home page
assign_user_to_page(self.home_page, self.user_master,
grant_on=ACCESS_PAGE_AND_DESCENDANTS, grant_all=True)
# and to master page
assign_user_to_page(self.master_page, self.user_master,
grant_on=ACCESS_PAGE_AND_DESCENDANTS, grant_all=True)
# slave page & slave user
self.slave_page = create_page("slave-home", "col_two.html", "en",
parent=self.master_page, created_by=self.user_super)
assign_user_to_page(self.slave_page, self.user_slave, grant_all=True)
# create page_b
page_b = create_page("pageB", "nav_playground.html", "en", created_by=self.user_super)
# Normal user
# it's allowed for the normal user to view the page
assign_user_to_page(page_b, self.user_normal, can_view=True)
# create page_a - sample page from master
page_a = create_page("pageA", "nav_playground.html", "en",
created_by=self.user_super)
assign_user_to_page(page_a, self.user_master,
can_add=True, can_change=True, can_delete=True, can_publish=True,
can_move_page=True)
# publish after creating all drafts
publish_page(self.home_page, self.user_super, 'en')
publish_page(self.master_page, self.user_super, 'en')
self.page_b = publish_page(page_b, self.user_super, 'en')
def _add_plugin(self, user, page):
"""
Add a plugin using the test client to check for permissions.
"""
with self.login_user_context(user):
placeholder = page.placeholders.all()[0]
post_data = {
'body': 'Test'
}
endpoint = self.get_add_plugin_uri(placeholder, 'TextPlugin')
response = self.client.post(endpoint, post_data)
self.assertEqual(response.status_code, 302)
return response.content.decode('utf8')
def test_super_can_add_page_to_root(self):
with self.login_user_context(self.user_super):
response = self.client.get(URL_CMS_PAGE_ADD)
self.assertEqual(response.status_code, 200)
def test_master_cannot_add_page_to_root(self):
with self.login_user_context(self.user_master):
response = self.client.get(URL_CMS_PAGE_ADD)
self.assertEqual(response.status_code, 403)
def test_slave_cannot_add_page_to_root(self):
with self.login_user_context(self.user_slave):
response = self.client.get(URL_CMS_PAGE_ADD)
self.assertEqual(response.status_code, 403)
def test_slave_can_add_page_under_slave_home(self):
with self.login_user_context(self.user_slave):
# move to admin.py?
# url = URL_CMS_PAGE_ADD + "?target=%d&position=last-child" % slave_page.pk
# can he even access it over get?
# response = self.client.get(url)
# self.assertEqual(response.status_code, 200)
# add page
page = create_page("page", "nav_playground.html", "en",
parent=self.slave_page, created_by=self.user_slave)
# adds user_slave as page moderator for this page
# public model shouldn't be available yet, because of the moderation
# moderators and approval ok?
# must not have public object yet
self.assertFalse(page.publisher_public)
self.assertObjectExist(Title.objects, slug="page")
self.assertObjectDoesNotExist(Title.objects.public(), slug="page")
self.assertTrue(user_can_publish_page(self.user_slave, page))
# publish as slave, published as user_master before
publish_page(page, self.user_slave, 'en')
# user_slave is moderator for this page
# approve / publish as user_slave
# user master should be able to approve as well
@override_settings(
CMS_PLACEHOLDER_CONF={
'col_left': {
'default_plugins': [
{
'plugin_type': 'TextPlugin',
'values': {
'body': 'Lorem ipsum dolor sit amet, consectetur adipisicing elit. Culpa, repellendus, delectus, quo quasi ullam inventore quod quam aut voluptatum aliquam voluptatibus harum officiis officia nihil minus unde accusamus dolorem repudiandae.'
},
},
]
},
},
)
def test_default_plugins(self):
with self.login_user_context(self.user_slave):
self.assertEqual(CMSPlugin.objects.count(), 0)
response = self.client.get(self.slave_page.get_absolute_url(), {'edit': 1})
self.assertEqual(response.status_code, 200)
self.assertEqual(CMSPlugin.objects.count(), 1)
def test_page_added_by_slave_can_be_published_by_user_master(self):
# add page
page = create_page("page", "nav_playground.html", "en",
parent=self.slave_page, created_by=self.user_slave)
# same as test_slave_can_add_page_under_slave_home
# must not have public object yet
self.assertFalse(page.publisher_public)
self.assertTrue(user_can_publish_page(self.user_master, page))
# should be True user_master should have publish permissions for children as well
publish_page(self.slave_page, self.user_master, 'en')
page = publish_page(page, self.user_master, 'en')
self.assertTrue(page.publisher_public_id)
# user_master is moderator for top level page / but can't approve descendants?
# approve / publish as user_master
# user master should be able to approve descendants
def test_super_can_add_plugin(self):
self._add_plugin(self.user_super, page=self.slave_page)
def test_master_can_add_plugin(self):
self._add_plugin(self.user_master, page=self.slave_page)
def test_slave_can_add_plugin(self):
self._add_plugin(self.user_slave, page=self.slave_page)
def test_subtree_needs_approval(self):
# create page under slave_page
page = create_page("parent", "nav_playground.html", "en",
parent=self.home_page)
self.assertFalse(page.publisher_public)
# create subpage under page
subpage = create_page("subpage", "nav_playground.html", "en", parent=page, published=False)
# publish both of them in reverse order
subpage = publish_page(subpage, self.user_master, 'en')
# subpage should not be published, because parent is not published
self.assertNeverPublished(subpage)
# publish page (parent of subage)
page = publish_page(page, self.user_master, 'en')
self.assertPublished(page)
self.assertNeverPublished(subpage)
subpage = publish_page(subpage, self.user_master, 'en')
self.assertPublished(subpage)
def test_subtree_with_super(self):
# create page under root
page = create_page("page", "nav_playground.html", "en")
self.assertFalse(page.publisher_public)
# create subpage under page
subpage = create_page("subpage", "nav_playground.html", "en",
parent=page)
self.assertFalse(subpage.publisher_public)
# tree id must be the same
self.assertEqual(page.node.path[0:4], subpage.node.path[0:4])
# publish both of them
page = self.reload(page)
page = publish_page(page, self.user_super, 'en')
# reload subpage, there were an path change
subpage = self.reload(subpage)
self.assertEqual(page.node.path[0:4], subpage.node.path[0:4])
subpage = publish_page(subpage, self.user_super, 'en')
# tree id must stay the same
self.assertEqual(page.node.path[0:4], subpage.node.path[0:4])
def test_super_add_page_to_root(self):
"""Create page which is not under moderation in root, and check if
some properties are correct.
"""
# create page under root
page = create_page("page", "nav_playground.html", "en")
# public must not exist
self.assertFalse(page.publisher_public)
def test_plugins_get_published(self):
# create page under root
page = create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.all()[0]
add_plugin(placeholder, "TextPlugin", "en", body="test")
# public must not exist
self.assertEqual(CMSPlugin.objects.all().count(), 1)
publish_page(page, self.user_super, 'en')
self.assertEqual(CMSPlugin.objects.all().count(), 2)
def test_remove_plugin_page_under_moderation(self):
# login as slave and create page
page = create_page("page", "nav_playground.html", "en", parent=self.slave_page)
# add plugin
placeholder = page.placeholders.all()[0]
plugin = add_plugin(placeholder, "TextPlugin", "en", body="test")
# publish page
page = self.reload(page)
page = publish_page(page, self.user_slave, 'en')
# only the draft plugin should exist
self.assertEqual(CMSPlugin.objects.all().count(), 1)
# master approves and publishes the page
# first approve slave-home
slave_page = self.reload(self.slave_page)
publish_page(slave_page, self.user_master, 'en')
page = self.reload(page)
page = publish_page(page, self.user_master, 'en')
# draft and public plugins should now exist
self.assertEqual(CMSPlugin.objects.all().count(), 2)
# login as slave and delete the plugin - should require moderation
with self.login_user_context(self.user_slave):
plugin_data = {
'plugin_id': plugin.pk
}
endpoint = self.get_delete_plugin_uri(plugin)
response = self.client.post(endpoint, plugin_data)
self.assertEqual(response.status_code, 302)
# there should only be a public plugin - since the draft has been deleted
self.assertEqual(CMSPlugin.objects.all().count(), 1)
page = self.reload(page)
# login as super user and approve/publish the page
publish_page(page, self.user_super, 'en')
# there should now be 0 plugins
self.assertEqual(CMSPlugin.objects.all().count(), 0)
def test_superuser_can_view(self):
url = self.page_b.get_absolute_url(language='en')
with self.login_user_context(self.user_super):
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_staff_can_view(self):
url = self.page_b.get_absolute_url(language='en')
all_view_perms = PagePermission.objects.filter(can_view=True)
# verifiy that the user_staff has access to this page
has_perm = False
for perm in all_view_perms:
if perm.page == self.page_b:
if perm.user == self.user_staff:
has_perm = True
self.assertEqual(has_perm, False)
login_ok = self.client.login(username=getattr(self.user_staff, get_user_model().USERNAME_FIELD),
password=getattr(self.user_staff, get_user_model().USERNAME_FIELD))
self.assertTrue(login_ok)
# really logged in
self.assertTrue('_auth_user_id' in self.client.session)
login_user_id = self.client.session.get('_auth_user_id')
user = get_user_model().objects.get(pk=self.user_staff.pk)
self.assertEqual(str(login_user_id), str(user.id))
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_user_normal_can_view(self):
url = self.page_b.get_absolute_url(language='en')
all_view_perms = PagePermission.objects.filter(can_view=True)
# verifiy that the normal_user has access to this page
normal_has_perm = False
for perm in all_view_perms:
if perm.page == self.page_b:
if perm.user == self.user_normal:
normal_has_perm = True
self.assertTrue(normal_has_perm)
with self.login_user_context(self.user_normal):
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# verifiy that the user_non_global has not access to this page
non_global_has_perm = False
for perm in all_view_perms:
if perm.page == self.page_b:
if perm.user == self.user_non_global:
non_global_has_perm = True
self.assertFalse(non_global_has_perm)
with self.login_user_context(self.user_non_global):
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
# non logged in user
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_user_globalpermission(self):
# Global user
user_global = self._create_user("global")
with self.login_user_context(self.user_super):
user_global = create_page_user(user_global, user_global)
user_global.is_staff = False
user_global.save() # Prevent is_staff permission
global_page = create_page("global", "nav_playground.html", "en",
published=True)
# Removed call since global page user doesn't have publish permission
#global_page = publish_page(global_page, user_global)
# it's allowed for the normal user to view the page
assign_user_to_page(global_page, user_global,
global_permission=True, can_view=True)
url = global_page.get_absolute_url('en')
all_view_perms = PagePermission.objects.filter(can_view=True)
has_perm = False
for perm in all_view_perms:
if perm.page == self.page_b and perm.user == user_global:
has_perm = True
self.assertEqual(has_perm, False)
global_page_perm_q = Q(user=user_global) & Q(can_view=True)
global_view_perms = GlobalPagePermission.objects.filter(global_page_perm_q).exists()
self.assertEqual(global_view_perms, True)
# user_global
with self.login_user_context(user_global):
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# self.non_user_global
has_perm = False
for perm in all_view_perms:
if perm.page == self.page_b and perm.user == self.user_non_global:
has_perm = True
self.assertEqual(has_perm, False)
global_page_perm_q = Q(user=self.user_non_global) & Q(can_view=True)
global_view_perms = GlobalPagePermission.objects.filter(global_page_perm_q).exists()
self.assertEqual(global_view_perms, False)
with self.login_user_context(self.user_non_global):
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_anonymous_user_public_for_all(self):
url = self.page_b.get_absolute_url('en')
with self.settings(CMS_PUBLIC_FOR='all'):
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_anonymous_user_public_for_none(self):
# default of when to show pages to anonymous user doesn't take
# global permissions into account
url = self.page_b.get_absolute_url('en')
with self.settings(CMS_PUBLIC_FOR=None):
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
@override_settings(CMS_PERMISSION=True)
class PatricksMoveTest(CMSTestCase):
"""
Fixtures contains 3 users and 1 published page and some other stuff
Users:
1. `super`: superuser
2. `master`: user with permissions to all applications
3. `slave`: user assigned to page `slave-home`
Pages:
1. `home`:
- published page
- master can do anything on its subpages, but not on home!
2. `master`:
- published page
- crated by super
- `master` can do anything on it and its descendants
- subpages:
3. `slave-home`:
- not published
- assigned slave user which can add/change/delete/
move/publish/moderate this page and its descendants
- `master` user want to moderate this page and all descendants
4. `pageA`:
- created by super
- master can add/change/delete on it and descendants
"""
def setUp(self):
# create super user
self.user_super = self._create_user("super", True, True)
with self.login_user_context(self.user_super):
self.home_page = create_page("home", "nav_playground.html", "en",
created_by=self.user_super)
# master page & master user
self.master_page = create_page("master", "nav_playground.html", "en")
# create master user
self.user_master = self._create_user("master", True)
self.add_permission(self.user_master, 'change_page')
self.add_permission(self.user_master, 'publish_page')
#self.user_master = create_page_user(self.user_super, master, grant_all=True)
# assign master user under home page
assign_user_to_page(self.home_page, self.user_master,
grant_on=ACCESS_DESCENDANTS, grant_all=True)
# and to master page
assign_user_to_page(self.master_page, self.user_master, grant_all=True)
# slave page & slave user
self.slave_page = create_page("slave-home", "nav_playground.html", "en",
parent=self.master_page, created_by=self.user_super)
slave = self._create_user("slave", True)
self.user_slave = create_page_user(self.user_super, slave, can_add_page=True,
can_change_page=True, can_delete_page=True)
assign_user_to_page(self.slave_page, self.user_slave, grant_all=True)
# create page_a - sample page from master
page_a = create_page("pageA", "nav_playground.html", "en",
created_by=self.user_super)
assign_user_to_page(page_a, self.user_master,
can_add=True, can_change=True, can_delete=True, can_publish=True,
can_move_page=True)
# publish after creating all drafts
publish_page(self.home_page, self.user_super, 'en')
publish_page(self.master_page, self.user_super, 'en')
with self.login_user_context(self.user_slave):
# 000200010001
self.pa = create_page("pa", "nav_playground.html", "en", parent=self.slave_page)
# 000200010002
self.pb = create_page("pb", "nav_playground.html", "en", parent=self.pa, position="right")
# 000200010003
self.pc = create_page("pc", "nav_playground.html", "en", parent=self.pb, position="right")
self.pd = create_page("pd", "nav_playground.html", "en", parent=self.pb)
self.pe = create_page("pe", "nav_playground.html", "en", parent=self.pd, position="right")
self.pf = create_page("pf", "nav_playground.html", "en", parent=self.pe)
self.pg = create_page("pg", "nav_playground.html", "en", parent=self.pf, position="right")
self.ph = create_page("ph", "nav_playground.html", "en", parent=self.pf, position="right")
self.assertFalse(self.pg.publisher_public)
# login as master for approval
self.slave_page = self.slave_page.reload()
publish_page(self.slave_page, self.user_master, 'en')
# publish and approve them all
publish_page(self.pa, self.user_master, 'en')
publish_page(self.pb, self.user_master, 'en')
publish_page(self.pc, self.user_master, 'en')
publish_page(self.pd, self.user_master, 'en')
publish_page(self.pe, self.user_master, 'en')
publish_page(self.pf, self.user_master, 'en')
publish_page(self.pg, self.user_master, 'en')
publish_page(self.ph, self.user_master, 'en')
self.reload_pages()
def reload_pages(self):
self.pa = self.pa.reload()
self.pb = self.pb.reload()
self.pc = self.pc.reload()
self.pd = self.pd.reload()
self.pe = self.pe.reload()
self.pf = self.pf.reload()
self.pg = self.pg.reload()
self.ph = self.ph.reload()
def test_patricks_move(self):
"""
Tests permmod when moving trees of pages.
1. build following tree (master node is approved and published)
slave-home
/ | \
A B C
/ \
D E
/ | \
F G H
2. perform move operations:
1. move G under C
2. move E under G
slave-home
/ | \
A B C
/ \
D G
\
E
/ \
F H
3. approve nodes in following order:
1. approve H
2. approve G
3. approve E
4. approve F
"""
self.assertEqual(self.pg.node.parent, self.pe.node)
# perform moves under slave...
self.move_page(self.pg, self.pc)
self.reload_pages()
# page is now under PC
self.assertEqual(self.pg.node.parent, self.pc.node)
self.assertEqual(self.pg.get_absolute_url(), self.pg.publisher_public.get_absolute_url())
self.move_page(self.pe, self.pg)
self.reload_pages()
self.assertEqual(self.pe.node.parent, self.pg.node)
self.ph = self.ph.reload()
# check urls - they should stay be the same now after the move
self.assertEqual(
self.pg.publisher_public.get_absolute_url(),
self.pg.get_absolute_url()
)
self.assertEqual(
self.ph.publisher_public.get_absolute_url(),
self.ph.get_absolute_url()
)
# check if urls are correct after move
self.assertEqual(
self.pg.publisher_public.get_absolute_url(),
u'%smaster/slave-home/pc/pg/' % self.get_pages_root()
)
self.assertEqual(
self.ph.publisher_public.get_absolute_url(),
u'%smaster/slave-home/pc/pg/pe/ph/' % self.get_pages_root()
)
class ModeratorSwitchCommandTest(CMSTestCase):
def test_switch_moderator_on(self):
site = get_current_site()
with force_language("en"):
pages_root = unquote(reverse("pages-root"))
page1 = create_page('page', 'nav_playground.html', 'en', published=True)
with disable_logger(log):
call_command('cms', 'moderator', 'on')
with force_language("en"):
path = page1.get_absolute_url()[len(pages_root):].strip('/')
page2 = get_page_from_path(site, path)
self.assertEqual(page1.get_absolute_url(), page2.get_absolute_url())
def test_table_name_patching(self):
"""
This tests the plugin models patching when publishing from the command line
"""
self.get_superuser()
create_page("The page!", "nav_playground.html", "en", published=True)
draft = Page.objects.drafts()[0]
draft.reverse_id = 'a_test' # we have to change *something*
draft.save()
add_plugin(draft.placeholders.get(slot=u"body"),
u"TextPlugin", u"en", body="Test content")
draft.publish('en')
add_plugin(draft.placeholders.get(slot=u"body"),
u"TextPlugin", u"en", body="Test content")
# Manually undoing table name patching
Text._meta.db_table = 'djangocms_text_ckeditor_text'
plugin_pool.patched = False
with disable_logger(log):
call_command('cms', 'moderator', 'on')
# Sanity check the database (we should have one draft and one public)
not_drafts = len(Page.objects.filter(publisher_is_draft=False))
drafts = len(Page.objects.filter(publisher_is_draft=True))
self.assertEqual(not_drafts, 1)
self.assertEqual(drafts, 1)
def test_switch_moderator_off(self):
site = get_current_site()
with force_language("en"):
pages_root = unquote(reverse("pages-root"))
page1 = create_page('page', 'nav_playground.html', 'en', published=True)
path = page1.get_absolute_url()[len(pages_root):].strip('/')
page2 = get_page_from_path(site, path)
self.assertIsNotNone(page2)
self.assertEqual(page1.get_absolute_url(), page2.get_absolute_url())
class ViewPermissionBaseTests(CMSTestCase):
def setUp(self):
self.page = create_page('testpage', 'nav_playground.html', 'en')
self.site = get_current_site()
def get_request(self, user=None):
attrs = {
'user': user or AnonymousUser(),
'REQUEST': {},
'POST': {},
'GET': {},
'session': {},
}
return type('Request', (object,), attrs)
def assertViewAllowed(self, page, user=None):
if not user:
user = AnonymousUser()
self.assertTrue(user_can_view_page(user, page))
def assertViewNotAllowed(self, page, user=None):
if not user:
user = AnonymousUser()
self.assertFalse(user_can_view_page(user, page))
@override_settings(
CMS_PERMISSION=False,
CMS_PUBLIC_FOR='staff',
)
class BasicViewPermissionTests(ViewPermissionBaseTests):
"""
Test functionality with CMS_PERMISSION set to false, as this is the
normal use case
"""
@override_settings(CMS_PUBLIC_FOR="all")
def test_unauth_public(self):
request = self.get_request()
with self.assertNumQueries(0):
self.assertViewAllowed(self.page)
self.assertEqual(get_visible_nodes(request, [self.page], self.site),
[self.page])
def test_unauth_non_access(self):
request = self.get_request()
with self.assertNumQueries(0):
self.assertViewNotAllowed(self.page)
self.assertEqual(get_visible_nodes(request, [self.page], self.site),
[])
@override_settings(CMS_PUBLIC_FOR="all")
def test_staff_public_all(self):
user = self.get_staff_user_with_no_permissions()
request = self.get_request(user)
with self.assertNumQueries(0):
self.assertViewAllowed(self.page, user)
self.assertEqual(get_visible_nodes(request, [self.page], self.site),
[self.page])
def test_staff_public_staff(self):
user = self.get_staff_user_with_no_permissions()
request = self.get_request(user)
with self.assertNumQueries(0):
self.assertViewAllowed(self.page, user)
self.assertEqual(get_visible_nodes(request, [self.page], self.site),
[self.page])
def test_staff_basic_auth(self):
user = self.get_staff_user_with_no_permissions()
request = self.get_request(user)
with self.assertNumQueries(0):
self.assertViewAllowed(self.page, user)
self.assertEqual(get_visible_nodes(request, [self.page], self.site),
[self.page])
@override_settings(CMS_PUBLIC_FOR="all")
def test_normal_basic_auth(self):
user = self.get_standard_user()
request = self.get_request(user)
with self.assertNumQueries(0):
self.assertViewAllowed(self.page, user)
self.assertEqual(get_visible_nodes(request, [self.page], self.site), [self.page])
@override_settings(
CMS_PERMISSION=True,
CMS_PUBLIC_FOR='none'
)
class UnrestrictedViewPermissionTests(ViewPermissionBaseTests):
"""
Test functionality with CMS_PERMISSION set to True but no restrictions
apply to this specific page
"""
def test_unauth_non_access(self):
request = self.get_request()
with self.assertNumQueries(1):
"""
The query is:
PagePermission query for the affected page (is the page restricted?)
"""
self.assertViewNotAllowed(self.page)
self.assertEqual(get_visible_nodes(request, [self.page], self.site), [])
def test_global_access(self):
user = self.get_standard_user()
GlobalPagePermission.objects.create(can_view=True, user=user)
request = self.get_request(user)
with self.assertNumQueries(4):
"""The queries are:
PagePermission query for the affected page (is the page restricted?)
Generic django permission lookup
content type lookup by permission lookup
GlobalPagePermission query for the page site
"""
self.assertViewAllowed(self.page, user)
self.assertEqual(get_visible_nodes(request, [self.page], self.site), [self.page])
def test_normal_denied(self):
user = self.get_standard_user()
request = self.get_request(user)
with self.assertNumQueries(4):
"""
The queries are:
PagePermission query for the affected page (is the page restricted?)
GlobalPagePermission query for the page site
User permissions query
Content type query
"""
self.assertViewNotAllowed(self.page, user)
self.assertEqual(get_visible_nodes(request, [self.page], self.site), [])
@override_settings(
CMS_PERMISSION=True,
CMS_PUBLIC_FOR='all'
)
class RestrictedViewPermissionTests(ViewPermissionBaseTests):
"""
Test functionality with CMS_PERMISSION set to True and view restrictions
apply to this specific page
"""
def setUp(self):
super(RestrictedViewPermissionTests, self).setUp()
self.group = Group.objects.create(name='testgroup')
self.pages = [self.page]
self.expected = [self.page]
PagePermission.objects.create(page=self.page, group=self.group, can_view=True, grant_on=ACCESS_PAGE)
def test_unauthed(self):
request = self.get_request()
with self.assertNumQueries(1):
"""The queries are:
PagePermission query for the affected page (is the page restricted?)
"""
self.assertViewNotAllowed(self.page)
self.assertEqual(get_visible_nodes(request, self.pages, self.site), [])
def test_page_permissions(self):
user = self.get_standard_user()
request = self.get_request(user)
PagePermission.objects.create(can_view=True, user=user, page=self.page, grant_on=ACCESS_PAGE)
with self.assertNumQueries(6):
"""
The queries are:
PagePermission query (is this page restricted)
content type lookup (x2)
GlobalpagePermission query for user
TreeNode lookup
PagePermission query for this user
"""
self.assertViewAllowed(self.page, user)
self.assertEqual(get_visible_nodes(request, self.pages, self.site), self.expected)
def test_page_group_permissions(self):
user = self.get_standard_user()
user.groups.add(self.group)
request = self.get_request(user)
with self.assertNumQueries(6):
"""
The queries are:
PagePermission query (is this page restricted)
content type lookup (x2)
GlobalpagePermission query for user
TreeNode lookup
PagePermission query for user
"""
self.assertViewAllowed(self.page, user)
self.assertEqual(get_visible_nodes(request, self.pages, self.site), self.expected)
def test_global_permission(self):
user = self.get_standard_user()
GlobalPagePermission.objects.create(can_view=True, user=user)
request = self.get_request(user)
with self.assertNumQueries(4):
"""
The queries are:
PagePermission query (is this page restricted)
Generic django permission lookup
content type lookup by permission lookup
GlobalpagePermission query for user
"""
self.assertViewAllowed(self.page, user)
self.assertEqual(get_visible_nodes(request, self.pages, self.site), self.expected)
def test_basic_perm_denied(self):
user = self.get_staff_user_with_no_permissions()
request = self.get_request(user)
with self.assertNumQueries(6):
"""
The queries are:
PagePermission query (is this page restricted)
content type lookup x2
GlobalpagePermission query for user
TreeNode lookup
PagePermission query for this user
"""
self.assertViewNotAllowed(self.page, user)
self.assertEqual(get_visible_nodes(request, self.pages, self.site), [])
def test_basic_perm(self):
user = self.get_standard_user()
user.user_permissions.add(Permission.objects.get(codename='view_page'))
request = self.get_request(user)
with self.assertNumQueries(3):
"""
The queries are:
PagePermission query (is this page restricted)
Generic django permission lookup
content type lookup by permission lookup
"""
self.assertViewAllowed(self.page, user)
self.assertEqual(get_visible_nodes(request, self.pages, self.site), self.expected)
class PublicViewPermissionTests(RestrictedViewPermissionTests):
""" Run the same tests as before, but on the public page instead. """
def setUp(self):
super(PublicViewPermissionTests, self).setUp()
self.page.publish('en')
self.pages = [self.page.publisher_public]
self.expected = [self.page.publisher_public]
class GlobalPermissionTests(CMSTestCase):
def test_emulate_admin_index(self):
""" Call methods that emulate the adminsite instance's index.
This test was basically the reason for the new manager, in light of the
problem highlighted in ticket #1120, which asserts that giving a user
no site-specific rights when creating a GlobalPagePermission should
allow access to all sites.
"""
# create and then ignore this user.
superuser = self._create_user("super", is_staff=True, is_active=True,
is_superuser=True)
superuser.set_password("super")
superuser.save()
site_1 = Site.objects.get(pk=1)
site_2 = Site.objects.create(domain='example2.com', name='example2.com')
SITES = [site_1, site_2]
# create 2 staff users
USERS = [
self._create_user("staff", is_staff=True, is_active=True),
self._create_user("staff_2", is_staff=True, is_active=True),
]
for user in USERS:
user.set_password('staff')
# re-use the same methods the UserPage form does.
# Note that it internally calls .save(), as we've not done so.
save_permissions({
'can_add_page': True,
'can_change_page': True,
'can_delete_page': False
}, user)
GlobalPagePermission.objects.create(can_add=True, can_change=True,
can_delete=False, user=USERS[0])
# we're querying here to ensure that even though we've created two users
# above, we should have successfully filtered to just one perm.
self.assertEqual(1, GlobalPagePermission.objects.with_user(USERS[0]).count())
# this will confirm explicit permissions still work, by adding the first
# site instance to the many2many relationship 'sites'
GlobalPagePermission.objects.create(can_add=True, can_change=True,
can_delete=False,
user=USERS[1]).sites.add(SITES[0])
self.assertEqual(1, GlobalPagePermission.objects.with_user(USERS[1]).count())
homepage = create_page(title="master", template="nav_playground.html",
language="en", in_navigation=True, slug='/')
publish_page(page=homepage, user=superuser, language='en')
with self.settings(CMS_PERMISSION=True):
# for all users, they should have access to site 1
request = RequestFactory().get(path='/')
request.session = {'cms_admin_site': site_1.pk}
request.current_page = None
for user in USERS:
request.user = user
# Note, the query count is inflated by doing additional lookups
# because there's a site param in the request.
with self.assertNumQueries(FuzzyInt(3,4)):
# internally this calls PageAdmin.has_[add|change|delete]_permission()
self.assertEqual({'add': True, 'change': True, 'delete': False},
site._registry[Page].get_model_perms(request))
# can't use the above loop for this test, as we're testing that
# user 1 has access, but user 2 does not, as they are only assigned
# to site 1
request = RequestFactory().get(path='/')
request.session = {'cms_admin_site': site_2.pk}
request.current_page = None
# Refresh internal user cache
USERS[0] = self.reload(USERS[0])
USERS[1] = self.reload(USERS[1])
# As before, the query count is inflated by doing additional lookups
# because there's a site param in the request
with self.assertNumQueries(FuzzyInt(5, 15)):
# this user shouldn't have access to site 2
request.user = USERS[1]
self.assertEqual({'add': False, 'change': False, 'delete': False},
site._registry[Page].get_model_perms(request))
# but, going back to the first user, they should.
request = RequestFactory().get('/', data={'site__exact': site_2.pk})
request.user = USERS[0]
request.current_page = None
request.session = {}
self.assertEqual({'add': True, 'change': True, 'delete': False},
site._registry[Page].get_model_perms(request))
def test_has_page_add_permission_with_target(self):
page = create_page('Test', 'nav_playground.html', 'en')
user = self._create_user('user')
request = RequestFactory().get('/', data={'target': page.pk})
request.session = {}
request.user = user
has_perm = site._registry[Page].has_add_permission(request)
self.assertFalse(has_perm)
|
czpython/django-cms
|
cms/tests/test_permmod.py
|
Python
|
bsd-3-clause
| 44,347
|
import logging
class Error(Exception):
def __init__(self, message, data = {}):
self.message = message
self.data = data
def __str__(self):
return self.message + ": " + repr(self.data)
@staticmethod
def die(code, error, message = None):
if isinstance(error, Exception):
e = error
error = '{0}.{1}'.format(type(e).__module__, type(e).__name__)
message = str(e)
print 'Error: ' + error
if message:
print message
#logging.exception(message)
exit(code)
|
hiqdev/reppy
|
heppy/Error.py
|
Python
|
bsd-3-clause
| 580
|
# -*- coding: utf-8 -*-
"""
flask.ctx
~~~~~~~~~
Implements the objects required to keep the context.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import sys
from functools import update_wrapper
from werkzeug.exceptions import HTTPException
from .globals import _request_ctx_stack, _app_ctx_stack
from .module import blueprint_is_module
from .signals import appcontext_pushed, appcontext_popped
class _AppCtxGlobals(object):
"""A plain object."""
def get(self, name, default=None):
return self.__dict__.get(name, default)
def __contains__(self, item):
return item in self.__dict__
def __iter__(self):
return iter(self.__dict__)
def __repr__(self):
top = _app_ctx_stack.top
if top is not None:
return '<flask.g of %r>' % top.app.name
return object.__repr__(self)
def after_this_request(f):
"""Executes a function after this request. This is useful to modify
response objects. The function is passed the response object and has
to return the same or a new one.
Example::
@app.route('/')
def index():
@after_this_request
def add_header(response):
response.headers['X-Foo'] = 'Parachute'
return response
return 'Hello World!'
This is more useful if a function other than the view function wants to
modify a response. For instance think of a decorator that wants to add
some headers without converting the return value into a response object.
.. versionadded:: 0.9
"""
_request_ctx_stack.top._after_request_functions.append(f)
return f
def copy_current_request_context(f):
"""A helper function that decorates a function to retain the current
request context. This is useful when working with greenlets. The moment
the function is decorated a copy of the request context is created and
then pushed when the function is called.
Example::
import gevent
from flask import copy_current_request_context
@app.route('/')
def index():
@copy_current_request_context
def do_some_work():
# do some work here, it can access flask.request like you
# would otherwise in the view function.
...
gevent.spawn(do_some_work)
return 'Regular response'
.. versionadded:: 0.10
"""
top = _request_ctx_stack.top
if top is None:
raise RuntimeError('This decorator can only be used at local scopes '
'when a request context is on the stack. For instance within '
'view functions.')
reqctx = top.copy()
def wrapper(*args, **kwargs):
with reqctx:
return f(*args, **kwargs)
return update_wrapper(wrapper, f)
def has_request_context():
"""If you have code that wants to test if a request context is there or
not this function can be used. For instance, you may want to take advantage
of request information if the request object is available, but fail
silently if it is unavailable.
::
class User(db.Model):
def __init__(self, username, remote_addr=None):
self.username = username
if remote_addr is None and has_request_context():
remote_addr = request.remote_addr
self.remote_addr = remote_addr
Alternatively you can also just test any of the context bound objects
(such as :class:`request` or :class:`g` for truthness)::
class User(db.Model):
def __init__(self, username, remote_addr=None):
self.username = username
if remote_addr is None and request:
remote_addr = request.remote_addr
self.remote_addr = remote_addr
.. versionadded:: 0.7
"""
return _request_ctx_stack.top is not None
def has_app_context():
"""Works like :func:`has_request_context` but for the application
context. You can also just do a boolean check on the
:data:`current_app` object instead.
.. versionadded:: 0.9
"""
return _app_ctx_stack.top is not None
class AppContext(object):
"""The application context binds an application object implicitly
to the current thread or greenlet, similar to how the
:class:`RequestContext` binds request information. The application
context is also implicitly created if a request context is created
but the application is not on top of the individual application
context.
"""
def __init__(self, app):
self.app = app
self.url_adapter = app.create_url_adapter(None)
self.g = app.app_ctx_globals_class()
# Like request context, app contexts can be pushed multiple times
# but there a basic "refcount" is enough to track them.
self._refcnt = 0
def push(self):
"""Binds the app context to the current context."""
self._refcnt += 1
if hasattr(sys, 'exc_clear'):
sys.exc_clear()
_app_ctx_stack.push(self)
appcontext_pushed.send(self.app)
def pop(self, exc=None):
"""Pops the app context."""
self._refcnt -= 1
if self._refcnt <= 0:
if exc is None:
exc = sys.exc_info()[1]
self.app.do_teardown_appcontext(exc)
rv = _app_ctx_stack.pop()
assert rv is self, 'Popped wrong app context. (%r instead of %r)' \
% (rv, self)
appcontext_popped.send(self.app)
def __enter__(self):
self.push()
return self
def __exit__(self, exc_type, exc_value, tb):
self.pop(exc_value)
class RequestContext(object):
"""The request context contains all request relevant information. It is
created at the beginning of the request and pushed to the
`_request_ctx_stack` and removed at the end of it. It will create the
URL adapter and request object for the WSGI environment provided.
Do not attempt to use this class directly, instead use
:meth:`~flask.Flask.test_request_context` and
:meth:`~flask.Flask.request_context` to create this object.
When the request context is popped, it will evaluate all the
functions registered on the application for teardown execution
(:meth:`~flask.Flask.teardown_request`).
The request context is automatically popped at the end of the request
for you. In debug mode the request context is kept around if
exceptions happen so that interactive debuggers have a chance to
introspect the data. With 0.4 this can also be forced for requests
that did not fail and outside of `DEBUG` mode. By setting
``'flask._preserve_context'`` to `True` on the WSGI environment the
context will not pop itself at the end of the request. This is used by
the :meth:`~flask.Flask.test_client` for example to implement the
deferred cleanup functionality.
You might find this helpful for unittests where you need the
information from the context local around for a little longer. Make
sure to properly :meth:`~werkzeug.LocalStack.pop` the stack yourself in
that situation, otherwise your unittests will leak memory.
"""
def __init__(self, app, environ, request=None):
self.app = app
if request is None:
request = app.request_class(environ)
self.request = request
self.url_adapter = app.create_url_adapter(self.request)
self.flashes = None
self.session = None
# Request contexts can be pushed multiple times and interleaved with
# other request contexts. Now only if the last level is popped we
# get rid of them. Additionally if an application context is missing
# one is created implicitly so for each level we add this information
self._implicit_app_ctx_stack = []
# indicator if the context was preserved. Next time another context
# is pushed the preserved context is popped.
self.preserved = False
# remembers the exception for pop if there is one in case the context
# preservation kicks in.
self._preserved_exc = None
# Functions that should be executed after the request on the response
# object. These will be called before the regular "after_request"
# functions.
self._after_request_functions = []
self.match_request()
# XXX: Support for deprecated functionality. This is going away with
# Flask 1.0
blueprint = self.request.blueprint
if blueprint is not None:
# better safe than sorry, we don't want to break code that
# already worked
bp = app.blueprints.get(blueprint)
if bp is not None and blueprint_is_module(bp):
self.request._is_old_module = True
def _get_g(self):
return _app_ctx_stack.top.g
def _set_g(self, value):
_app_ctx_stack.top.g = value
g = property(_get_g, _set_g)
del _get_g, _set_g
def copy(self):
"""Creates a copy of this request context with the same request object.
This can be used to move a request context to a different greenlet.
Because the actual request object is the same this cannot be used to
move a request context to a different thread unless access to the
request object is locked.
.. versionadded:: 0.10
"""
return self.__class__(self.app,
environ=self.request.environ,
request=self.request
)
def match_request(self):
"""Can be overridden by a subclass to hook into the matching
of the request.
"""
try:
url_rule, self.request.view_args = \
self.url_adapter.match(return_rule=True)
self.request.url_rule = url_rule
except HTTPException as e:
self.request.routing_exception = e
def push(self):
"""Binds the request context to the current context."""
# If an exception occurs in debug mode or if context preservation is
# activated under exception situations exactly one context stays
# on the stack. The rationale is that you want to access that
# information under debug situations. However if someone forgets to
# pop that context again we want to make sure that on the next push
# it's invalidated, otherwise we run at risk that something leaks
# memory. This is usually only a problem in testsuite since this
# functionality is not active in production environments.
top = _request_ctx_stack.top
if top is not None and top.preserved:
top.pop(top._preserved_exc)
# Before we push the request context we have to ensure that there
# is an application context.
app_ctx = _app_ctx_stack.top
if app_ctx is None or app_ctx.app != self.app:
app_ctx = self.app.app_context()
app_ctx.push()
self._implicit_app_ctx_stack.append(app_ctx)
else:
self._implicit_app_ctx_stack.append(None)
if hasattr(sys, 'exc_clear'):
sys.exc_clear()
_request_ctx_stack.push(self)
# Open the session at the moment that the request context is
# available. This allows a custom open_session method to use the
# request context (e.g. code that access database information
# stored on `g` instead of the appcontext).
self.session = self.app.open_session(self.request)
if self.session is None:
self.session = self.app.make_null_session()
def pop(self, exc=None):
"""Pops the request context and unbinds it by doing that. This will
also trigger the execution of functions registered by the
:meth:`~flask.Flask.teardown_request` decorator.
.. versionchanged:: 0.9
Added the `exc` argument.
"""
app_ctx = self._implicit_app_ctx_stack.pop()
clear_request = False
if not self._implicit_app_ctx_stack:
self.preserved = False
self._preserved_exc = None
if exc is None:
exc = sys.exc_info()[1]
self.app.do_teardown_request(exc)
# If this interpreter supports clearing the exception information
# we do that now. This will only go into effect on Python 2.x,
# on 3.x it disappears automatically at the end of the exception
# stack.
if hasattr(sys, 'exc_clear'):
sys.exc_clear()
request_close = getattr(self.request, 'close', None)
if request_close is not None:
request_close()
clear_request = True
rv = _request_ctx_stack.pop()
assert rv is self, 'Popped wrong request context. (%r instead of %r)' \
% (rv, self)
# get rid of circular dependencies at the end of the request
# so that we don't require the GC to be active.
if clear_request:
rv.request.environ['werkzeug.request'] = None
# Get rid of the app as well if necessary.
if app_ctx is not None:
app_ctx.pop(exc)
def auto_pop(self, exc):
if self.request.environ.get('flask._preserve_context') or \
(exc is not None and self.app.preserve_context_on_exception):
self.preserved = True
self._preserved_exc = exc
else:
self.pop(exc)
def __enter__(self):
self.push()
return self
def __exit__(self, exc_type, exc_value, tb):
# do not pop the request stack if we are in debug mode and an
# exception happened. This will allow the debugger to still
# access the request object in the interactive shell. Furthermore
# the context can be force kept alive for the test client.
# See flask.testing for how this works.
self.auto_pop(exc_value)
def __repr__(self):
return '<%s \'%s\' [%s] of %s>' % (
self.__class__.__name__,
self.request.url,
self.request.method,
self.app.name,
)
|
MjAbuz/flask
|
flask/ctx.py
|
Python
|
bsd-3-clause
| 14,399
|
# -*- coding: utf-8 -*-
from gevent import Greenlet
from gevent import sleep
from .base import SchedulerMixin
class Scheduler(SchedulerMixin, Greenlet):
"""
Gevent scheduler. Only replaces the sleep method for correct
context switching.
"""
def sleep(self, seconds):
sleep(seconds)
def return_callback(self, *args):
return self.callback(*args)
def _run(self):
self.start_loop()
|
niwinz/django-greenqueue
|
greenqueue/scheduler/gevent_scheduler.py
|
Python
|
bsd-3-clause
| 437
|
from __future__ import absolute_import, division, print_function
from collections import Iterator
from flask import Flask, request, jsonify, json
from functools import partial, wraps
from .index import parse_index
class Server(object):
__slots__ = 'app', 'datasets'
def __init__(self, name='Blaze-Server', datasets=None):
app = self.app = Flask(name)
self.datasets = datasets or dict()
for args, kwargs, func in routes:
func2 = wraps(func)(partial(func, self.datasets))
app.route(*args, **kwargs)(func2)
def __getitem__(self, key):
return self.datasets[key]
def __setitem__(self, key, value):
self.datasets[key] = value
return value
routes = list()
def route(*args, **kwargs):
def f(func):
routes.append((args, kwargs, func))
return func
return f
@route('/datasets.json')
def dataset(datasets):
return jsonify(dict((k, str(v.dshape)) for k, v in datasets.items()))
@route('/data/<name>.json', methods=['POST', 'PUT', 'GET'])
def data(datasets, name):
""" Basic indexing API
Allows remote indexing of datasets. Takes indexing data as JSON
Takes requests like
Example
-------
For the following array:
[['Alice', 100],
['Bob', 200],
['Charlie', 300]]
schema = '{name: string, amount: int32}'
And the following
url: /data/table-name.json
POST-data: {'index': [{'start': 0, 'step': 3}, 'name']}
and returns responses like
{"name": "table-name",
"index": [0, "name"],
"datashape": "3 * string",
"data": ["Alice", "Bob", "Charlie"]}
"""
if request.headers['content-type'] != 'application/json':
return ("Expected JSON data", 404)
try:
data = json.loads(request.data)
except ValueError:
return ("Bad JSON. Got %s " % request.data, 404)
try:
dset = datasets[name]
except KeyError:
return ("Dataset %s not found" % name, 404)
try:
index = parse_index(data['index'])
except ValueError:
return ("Bad index", 404)
try:
rv = dset.py[index]
except RuntimeError:
return ("Bad index: %s" % (str(index)), 404)
if isinstance(rv, Iterator):
rv = list(rv)
return jsonify({'name': name,
'index': data['index'],
'datashape': str(dset.dshape.subshape[index]),
'data': rv})
|
aterrel/blaze
|
blaze/serve/server.py
|
Python
|
bsd-3-clause
| 2,462
|
# Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
import time
import os.path
from collections import OrderedDict, namedtuple
import gevent
import flask
from digits import device_query
from digits.task import Task
from digits.utils import subclass, override
# NOTE: Increment this everytime the picked object changes
PICKLE_VERSION = 2
# Used to store network outputs
NetworkOutput = namedtuple('NetworkOutput', ['kind', 'data'])
@subclass
class TrainTask(Task):
"""
Defines required methods for child classes
"""
def __init__(self, dataset, train_epochs, snapshot_interval, learning_rate, lr_policy, **kwargs):
"""
Arguments:
dataset -- a DatasetJob containing the dataset for this model
train_epochs -- how many epochs of training data to train on
snapshot_interval -- how many epochs between taking a snapshot
learning_rate -- the base learning rate
lr_policy -- a hash of options to be used for the learning rate policy
Keyword arguments:
gpu_count -- how many GPUs to use for training (integer)
selected_gpus -- a list of GPU indexes to be used for training
batch_size -- if set, override any network specific batch_size with this value
val_interval -- how many epochs between validating the model with an epoch of validation data
pretrained_model -- filename for a model to use for fine-tuning
crop_size -- crop each image down to a square of this size
use_mean -- subtract the dataset's mean file or mean pixel
random_seed -- optional random seed
"""
self.gpu_count = kwargs.pop('gpu_count', None)
self.selected_gpus = kwargs.pop('selected_gpus', None)
self.batch_size = kwargs.pop('batch_size', None)
self.val_interval = kwargs.pop('val_interval', None)
self.pretrained_model = kwargs.pop('pretrained_model', None)
self.crop_size = kwargs.pop('crop_size', None)
self.use_mean = kwargs.pop('use_mean', None)
self.random_seed = kwargs.pop('random_seed', None)
self.solver_type = kwargs.pop('solver_type', None)
self.shuffle = kwargs.pop('shuffle', None)
self.network = kwargs.pop('network', None)
self.framework_id = kwargs.pop('framework_id', None)
super(TrainTask, self).__init__(**kwargs)
self.pickver_task_train = PICKLE_VERSION
self.dataset = dataset
self.train_epochs = train_epochs
self.snapshot_interval = snapshot_interval
self.learning_rate = learning_rate
self.lr_policy = lr_policy
self.current_epoch = 0
self.snapshots = []
# data gets stored as dicts of lists (for graphing)
self.train_outputs = OrderedDict()
self.val_outputs = OrderedDict()
def __getstate__(self):
state = super(TrainTask, self).__getstate__()
if 'dataset' in state:
del state['dataset']
if 'snapshots' in state:
del state['snapshots']
if '_labels' in state:
del state['_labels']
if '_gpu_socketio_thread' in state:
del state['_gpu_socketio_thread']
return state
def __setstate__(self, state):
if state['pickver_task_train'] < 2:
state['train_outputs'] = OrderedDict()
state['val_outputs'] = OrderedDict()
tl = state.pop('train_loss_updates', None)
vl = state.pop('val_loss_updates', None)
va = state.pop('val_accuracy_updates', None)
lr = state.pop('lr_updates', None)
if tl:
state['train_outputs']['epoch'] = NetworkOutput('Epoch', [x[0] for x in tl])
state['train_outputs']['loss'] = NetworkOutput('SoftmaxWithLoss', [x[1] for x in tl])
state['train_outputs']['learning_rate'] = NetworkOutput('LearningRate', [x[1] for x in lr])
if vl:
state['val_outputs']['epoch'] = NetworkOutput('Epoch', [x[0] for x in vl])
if va:
state['val_outputs']['accuracy'] = NetworkOutput('Accuracy', [x[1]/100 for x in va])
state['val_outputs']['loss'] = NetworkOutput('SoftmaxWithLoss', [x[1] for x in vl])
if state['use_mean'] == True:
state['use_mean'] = 'pixel'
elif state['use_mean'] == False:
state['use_mean'] = 'none'
state['pickver_task_train'] = PICKLE_VERSION
super(TrainTask, self).__setstate__(state)
self.snapshots = []
self.dataset = None
@override
def offer_resources(self, resources):
if 'gpus' not in resources:
return None
if not resources['gpus']:
return {} # don't use a GPU at all
if self.gpu_count is not None:
identifiers = []
for resource in resources['gpus']:
if resource.remaining() >= 1:
identifiers.append(resource.identifier)
if len(identifiers) == self.gpu_count:
break
if len(identifiers) == self.gpu_count:
return {'gpus': [(i, 1) for i in identifiers]}
else:
return None
elif self.selected_gpus is not None:
all_available = True
for i in self.selected_gpus:
available = False
for gpu in resources['gpus']:
if i == gpu.identifier:
if gpu.remaining() >= 1:
available = True
break
if not available:
all_available = False
break
if all_available:
return {'gpus': [(i, 1) for i in self.selected_gpus]}
else:
return None
return None
@override
def before_run(self):
if 'gpus' in self.current_resources:
# start a thread which sends SocketIO updates about GPU utilization
self._gpu_socketio_thread = gevent.spawn(
self.gpu_socketio_updater,
[identifier for (identifier, value)
in self.current_resources['gpus']]
)
def gpu_socketio_updater(self, gpus):
"""
This thread sends SocketIO messages about GPU utilization
to connected clients
Arguments:
gpus -- a list of identifiers for the GPUs currently being used
"""
from digits.webapp import app, socketio
devices = []
for index in gpus:
device = device_query.get_device(index)
if device:
devices.append((index, device))
if not devices:
raise RuntimeError('Failed to load gpu information for "%s"' % gpus)
# this thread continues until killed in after_run()
while True:
data = []
for index, device in devices:
update = {'name': device.name, 'index': index}
nvml_info = device_query.get_nvml_info(index)
if nvml_info is not None:
update.update(nvml_info)
data.append(update)
with app.app_context():
html = flask.render_template('models/gpu_utilization.html',
data = data)
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'gpu_utilization',
'html': html,
},
namespace='/jobs',
room=self.job_id,
)
gevent.sleep(1)
def send_progress_update(self, epoch):
"""
Sends socketio message about the current progress
"""
if self.current_epoch == epoch:
return
self.current_epoch = epoch
self.progress = epoch/self.train_epochs
self.emit_progress_update()
def save_train_output(self, *args):
"""
Save output to self.train_outputs
"""
from digits.webapp import socketio
if not self.save_output(self.train_outputs, *args):
return
if self.last_train_update and (time.time() - self.last_train_update) < 5:
return
self.last_train_update = time.time()
self.logger.debug('Training %s%% complete.' % round(100 * self.current_epoch/self.train_epochs,2))
# loss graph data
data = self.combined_graph_data()
if data:
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'combined_graph',
'data': data,
},
namespace='/jobs',
room=self.job_id,
)
if data['columns']:
# isolate the Loss column data for the sparkline
graph_data = data['columns'][0][1:]
socketio.emit('task update',
{
'task': self.html_id(),
'job_id': self.job_id,
'update': 'combined_graph',
'data': graph_data,
},
namespace='/jobs',
room='job_management',
)
# lr graph data
data = self.lr_graph_data()
if data:
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'lr_graph',
'data': data,
},
namespace='/jobs',
room=self.job_id,
)
def save_val_output(self, *args):
"""
Save output to self.val_outputs
"""
from digits.webapp import socketio
if not self.save_output(self.val_outputs, *args):
return
# loss graph data
data = self.combined_graph_data()
if data:
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'combined_graph',
'data': data,
},
namespace='/jobs',
room=self.job_id,
)
def save_output(self, d, name, kind, value):
"""
Save output to self.train_outputs or self.val_outputs
Returns true if all outputs for this epoch have been added
Arguments:
d -- the dictionary where the output should be stored
name -- name of the output (e.g. "accuracy")
kind -- the type of outputs (e.g. "Accuracy")
value -- value for this output (e.g. 0.95)
"""
# don't let them be unicode
name = str(name)
kind = str(kind)
# update d['epoch']
if 'epoch' not in d:
d['epoch'] = NetworkOutput('Epoch', [self.current_epoch])
elif d['epoch'].data[-1] != self.current_epoch:
d['epoch'].data.append(self.current_epoch)
if name not in d:
d[name] = NetworkOutput(kind, [])
epoch_len = len(d['epoch'].data)
name_len = len(d[name].data)
# save to back of d[name]
if name_len > epoch_len:
raise Exception('Received a new output without being told the new epoch')
elif name_len == epoch_len:
# already exists
if isinstance(d[name].data[-1], list):
d[name].data[-1].append(value)
else:
d[name].data[-1] = [d[name].data[-1], value]
elif name_len == epoch_len - 1:
# expected case
d[name].data.append(value)
else:
# we might have missed one
for _ in xrange(epoch_len - name_len - 1):
d[name].data.append(None)
d[name].data.append(value)
for key in d:
if key not in ['epoch', 'learning_rate']:
if len(d[key].data) != epoch_len:
return False
return True
@override
def after_run(self):
if hasattr(self, '_gpu_socketio_thread'):
self._gpu_socketio_thread.kill()
def detect_snapshots(self):
"""
Populate self.snapshots with snapshots that exist on disk
Returns True if at least one usable snapshot is found
"""
return False
def snapshot_list(self):
"""
Returns an array of arrays for creating an HTML select field
"""
return [[s[1], 'Epoch #%s' % s[1]] for s in reversed(self.snapshots)]
def est_next_snapshot(self):
"""
Returns the estimated time in seconds until the next snapshot is taken
"""
return None
def can_view_weights(self):
"""
Returns True if this Task can visualize the weights of each layer for a given model
"""
raise NotImplementedError()
def view_weights(self, model_epoch=None, layers=None):
"""
View the weights for a specific model and layer[s]
"""
return None
def can_infer_one(self):
"""
Returns True if this Task can run inference on one input
"""
raise NotImplementedError()
def can_view_activations(self):
"""
Returns True if this Task can visualize the activations of a model after inference
"""
raise NotImplementedError()
def infer_one(self, data, model_epoch=None, layers=None):
"""
Run inference on one input
"""
return None
def can_infer_many(self):
"""
Returns True if this Task can run inference on many inputs
"""
raise NotImplementedError()
def infer_many(self, data, model_epoch=None):
"""
Run inference on many inputs
"""
return None
def get_labels(self):
"""
Read labels from labels_file and return them in a list
"""
# The labels might be set already
if hasattr(self, '_labels') and self._labels and len(self._labels) > 0:
return self._labels
assert hasattr(self.dataset, 'labels_file'), 'labels_file not set'
assert self.dataset.labels_file, 'labels_file not set'
assert os.path.exists(self.dataset.path(self.dataset.labels_file)), 'labels_file does not exist'
labels = []
with open(self.dataset.path(self.dataset.labels_file)) as infile:
for line in infile:
label = line.strip()
if label:
labels.append(label)
assert len(labels) > 0, 'no labels in labels_file'
self._labels = labels
return self._labels
def lr_graph_data(self):
"""
Returns learning rate data formatted for a C3.js graph
Keyword arguments:
"""
if not self.train_outputs or 'epoch' not in self.train_outputs or 'learning_rate' not in self.train_outputs:
return None
# return 100-200 values or fewer
stride = max(len(self.train_outputs['epoch'].data)/100,1)
e = ['epoch'] + self.train_outputs['epoch'].data[::stride]
lr = ['lr'] + self.train_outputs['learning_rate'].data[::stride]
return {
'columns': [e, lr],
'xs': {
'lr': 'epoch'
},
'names': {
'lr': 'Learning Rate'
},
}
def combined_graph_data(self, cull=True):
"""
Returns all train/val outputs in data for one C3.js graph
Keyword arguments:
cull -- if True, cut down the number of data points returned to a reasonable size
"""
data = {
'columns': [],
'xs': {},
'axes': {},
'names': {},
}
added_train_data = False
added_val_data = False
if self.train_outputs and 'epoch' in self.train_outputs:
if cull:
# max 200 data points
stride = max(len(self.train_outputs['epoch'].data)/100,1)
else:
# return all data
stride = 1
for name, output in self.train_outputs.iteritems():
if name not in ['epoch', 'learning_rate']:
col_id = '%s-train' % name
data['xs'][col_id] = 'train_epochs'
data['names'][col_id] = '%s (train)' % name
if 'accuracy' in output.kind.lower():
data['columns'].append([col_id] + [100*x for x in output.data[::stride]])
data['axes'][col_id] = 'y2'
else:
data['columns'].append([col_id] + output.data[::stride])
added_train_data = True
if added_train_data:
data['columns'].append(['train_epochs'] + self.train_outputs['epoch'].data[::stride])
if self.val_outputs and 'epoch' in self.val_outputs:
if cull:
# max 200 data points
stride = max(len(self.val_outputs['epoch'].data)/100,1)
else:
# return all data
stride = 1
for name, output in self.val_outputs.iteritems():
if name not in ['epoch']:
col_id = '%s-val' % name
data['xs'][col_id] = 'val_epochs'
data['names'][col_id] = '%s (val)' % name
if 'accuracy' in output.kind.lower():
data['columns'].append([col_id] + [100*x for x in output.data[::stride]])
data['axes'][col_id] = 'y2'
else:
data['columns'].append([col_id] + output.data[::stride])
added_val_data = True
if added_val_data:
data['columns'].append(['val_epochs'] + self.val_outputs['epoch'].data[::stride])
if added_train_data:
return data
else:
# return None if only validation data exists
# helps with ordering of columns in graph
return None
# return id of framework used for training
@override
def get_framework_id(self):
return self.framework_id
def get_model_files(self):
"""
return path to model file
"""
raise NotImplementedError()
def get_network_desc(self):
"""
return text description of model
"""
raise NotImplementedError()
|
batra-mlp-lab/DIGITS
|
digits/model/tasks/train.py
|
Python
|
bsd-3-clause
| 19,069
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Anscombe'] , ['Lag1Trend'] , ['Seasonal_WeekOfYear'] , ['SVR'] );
|
antoinecarme/pyaf
|
tests/model_control/detailed/transf_Anscombe/model_control_one_enabled_Anscombe_Lag1Trend_Seasonal_WeekOfYear_SVR.py
|
Python
|
bsd-3-clause
| 161
|
from setuptools import setup, find_packages
from orotangi import __version__ as version
install_requires = [
'Django==1.11.18',
'djangorestframework==3.6.2',
'django-cors-headers==2.0.2',
'django-filter==1.0.2',
'python-dateutil==2.6.0'
]
setup(
name='orotangi',
version=version,
description='Your Thoughts, Everywhere',
author='FoxMaSk',
maintainer='FoxMaSk',
author_email='foxmask@trigger-happy.eu',
maintainer_email='foxmask@trigger-happy.eu',
url='https://github.com/foxmask/orotangi',
download_url="https://github.com/foxmask/orotangi/"
"archive/orotangi-" + version + ".zip",
packages=find_packages(exclude=['orotangi/local_settings']),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.11',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Topic :: Internet',
'Topic :: Communications',
'Topic :: Database',
],
install_requires=install_requires,
include_package_data=True,
)
|
foxmask/orotangi
|
setup.py
|
Python
|
bsd-3-clause
| 1,297
|
# -*- coding: utf-8 -*-
#
# zhuyin_table.py
# cjktools
#
"""
An interface to the zhuyin <-> pinyin table.
"""
from functools import partial
from . import cjkdata
from cjktools.common import get_stream_context, stream_codec
def _default_stream():
return open(cjkdata.get_resource('tables/zhuyin_pinyin_conv_table'))
_get_stream_context = partial(get_stream_context, _default_stream)
def parse_lines(istream):
istream = stream_codec(istream)
for line in istream:
if not line.startswith('#'):
yield line.rstrip().split()
def zhuyin_to_pinyin_table(istream=None):
""" Returns a dictionary mapping zhuyin to pinyin. """
with _get_stream_context(istream) as stream:
table = {}
for zhuyin, pinyin in parse_lines(stream):
table[zhuyin] = pinyin
return table
def pinyin_to_zhuyin_table(istream=None):
""" Returns a dictionary mapping zhuyin to pinyin. """
with _get_stream_context(istream) as istream:
table = {}
for zhuyin, pinyin in parse_lines(istream):
table[pinyin] = zhuyin
return table
def get_all_pinyin(istream=None):
""" Returns a list of all pinyin """
with _get_stream_context(istream) as istream:
all_pinyin = ['r']
for zhuyin, pinyin in parse_lines(istream):
all_pinyin.append(pinyin)
return all_pinyin
def pinyin_regex_pattern(istream=None):
""" Returns a pinyin regex pattern, with optional tone number. """
all_pinyin = get_all_pinyin(istream)
# Sort from longest to shortest, so as to make maximum matches whenever
# possible.
all_pinyin = sorted(all_pinyin, key=len, reverse=True)
# Build a generic pattern for a single pinyin with an optional tone.
pattern = '(%s)([0-5]?)' % '|'.join(all_pinyin)
return pattern
def zhuyin_regex_pattern(istream=None):
""" Returns a zhuyin regex pattern. """
with _get_stream_context(istream) as istream:
all_pinyin = []
for zhuyin, pinyin in parse_lines(istream):
all_pinyin.append(pinyin)
pattern = '(%s)[0-4]?' % '|'.join(all_pinyin)
return pattern
|
larsyencken/cjktools
|
cjktools/resources/zhuyin_table.py
|
Python
|
bsd-3-clause
| 2,155
|
import tests.periodicities.period_test as per
per.buildModel((24 , 'BH' , 50));
|
antoinecarme/pyaf
|
tests/periodicities/Business_Hour/Cycle_Business_Hour_50_BH_24.py
|
Python
|
bsd-3-clause
| 82
|
from django.http import HttpRequest
import mock
import pytest
from nose.tools import assert_false
from olympia import amo
from olympia.amo.tests import TestCase, req_factory_factory
from olympia.amo.urlresolvers import reverse
from olympia.addons.models import Addon, AddonUser
from olympia.users.models import UserProfile
from .acl import (action_allowed, check_addon_ownership, check_ownership,
check_addons_reviewer, check_personas_reviewer,
check_unlisted_addons_reviewer, is_editor, match_rules)
pytestmark = pytest.mark.django_db
def test_match_rules():
"""
Unit tests for the match_rules method.
"""
rules = (
'*:*',
'Editors:*,Admin:EditAnyAddon,Admin:flagged,Admin:addons,'
'Admin:EditAnyCollection',
'Tests:*,Admin:serverstatus,Admin:users',
'Admin:EditAnyAddon,Admin:EditAnyLocale,Editors:*,'
'Admin:lists,Admin:applications,Admin:addons,Localizers:*',
'Admin:EditAnyAddon',
'Admin:ViewAnyStats,Admin:ViewAnyCollectionStats',
'Admin:ViewAnyStats',
'Editors:*,Admin:features',
'Admin:Statistics',
'Admin:Features,Editors:*',
'Admin:%',
'Admin:*',
'Admin:Foo',
'Admin:Bar',
)
for rule in rules:
assert match_rules(rule, 'Admin', '%'), "%s != Admin:%%" % rule
rules = (
'Doctors:*',
'Stats:View',
'CollectionStats:View',
'Addons:Review',
'Personas:Review',
'Locales:Edit',
'Locale.de:Edit',
'Reviews:Edit',
'None:None',
)
for rule in rules:
assert not match_rules(rule, 'Admin', '%'), \
"%s == Admin:%% and shouldn't" % rule
def test_anonymous_user():
# Fake request must not have .groups, just like an anonymous user.
fake_request = HttpRequest()
assert_false(action_allowed(fake_request, amo.FIREFOX, 'Admin:%'))
class ACLTestCase(TestCase):
"""Test some basic ACLs by going to various locked pages on AMO."""
fixtures = ['access/login.json']
def test_admin_login_anon(self):
# Login form for anonymous user on the admin page.
url = '/en-US/admin/models/'
r = self.client.get(url)
self.assert3xx(r, '%s?to=%s' % (reverse('users.login'), url))
class TestHasPerm(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestHasPerm, self).setUp()
assert self.client.login(username='del@icio.us', password='password')
self.user = UserProfile.objects.get(email='del@icio.us')
self.addon = Addon.objects.get(id=3615)
self.au = AddonUser.objects.get(addon=self.addon, user=self.user)
assert self.au.role == amo.AUTHOR_ROLE_OWNER
self.request = self.fake_request_with_user(self.user)
def fake_request_with_user(self, user):
request = mock.Mock()
request.groups = user.groups.all()
request.user = user
request.user.is_authenticated = mock.Mock(return_value=True)
return request
def login_admin(self):
assert self.client.login(username='admin@mozilla.com',
password='password')
return UserProfile.objects.get(email='admin@mozilla.com')
def test_anonymous(self):
self.request.user.is_authenticated.return_value = False
self.client.logout()
assert not check_addon_ownership(self.request, self.addon)
def test_admin(self):
self.request = self.fake_request_with_user(self.login_admin())
assert check_addon_ownership(self.request, self.addon)
assert check_addon_ownership(self.request, self.addon, admin=True)
assert not check_addon_ownership(self.request, self.addon, admin=False)
def test_require_author(self):
assert check_ownership(self.request, self.addon, require_author=True)
def test_require_author_when_admin(self):
self.request = self.fake_request_with_user(self.login_admin())
self.request.groups = self.request.user.groups.all()
assert check_ownership(self.request, self.addon, require_author=False)
assert not check_ownership(self.request, self.addon,
require_author=True)
def test_disabled(self):
self.addon.update(status=amo.STATUS_DISABLED)
assert not check_addon_ownership(self.request, self.addon)
self.test_admin()
def test_deleted(self):
self.addon.update(status=amo.STATUS_DELETED)
assert not check_addon_ownership(self.request, self.addon)
self.request.user = self.login_admin()
self.request.groups = self.request.user.groups.all()
assert not check_addon_ownership(self.request, self.addon)
def test_ignore_disabled(self):
self.addon.update(status=amo.STATUS_DISABLED)
assert check_addon_ownership(self.request, self.addon,
ignore_disabled=True)
def test_owner(self):
assert check_addon_ownership(self.request, self.addon)
self.au.role = amo.AUTHOR_ROLE_DEV
self.au.save()
assert not check_addon_ownership(self.request, self.addon)
self.au.role = amo.AUTHOR_ROLE_VIEWER
self.au.save()
assert not check_addon_ownership(self.request, self.addon)
self.au.role = amo.AUTHOR_ROLE_SUPPORT
self.au.save()
assert not check_addon_ownership(self.request, self.addon)
def test_dev(self):
assert check_addon_ownership(self.request, self.addon, dev=True)
self.au.role = amo.AUTHOR_ROLE_DEV
self.au.save()
assert check_addon_ownership(self.request, self.addon, dev=True)
self.au.role = amo.AUTHOR_ROLE_VIEWER
self.au.save()
assert not check_addon_ownership(self.request, self.addon, dev=True)
self.au.role = amo.AUTHOR_ROLE_SUPPORT
self.au.save()
assert not check_addon_ownership(self.request, self.addon, dev=True)
def test_viewer(self):
assert check_addon_ownership(self.request, self.addon, viewer=True)
self.au.role = amo.AUTHOR_ROLE_DEV
self.au.save()
assert check_addon_ownership(self.request, self.addon, viewer=True)
self.au.role = amo.AUTHOR_ROLE_VIEWER
self.au.save()
assert check_addon_ownership(self.request, self.addon, viewer=True)
self.au.role = amo.AUTHOR_ROLE_SUPPORT
self.au.save()
assert check_addon_ownership(self.request, self.addon, viewer=True)
def test_support(self):
assert check_addon_ownership(self.request, self.addon, viewer=True)
self.au.role = amo.AUTHOR_ROLE_DEV
self.au.save()
assert not check_addon_ownership(self.request, self.addon,
support=True)
self.au.role = amo.AUTHOR_ROLE_VIEWER
self.au.save()
assert not check_addon_ownership(self.request, self.addon,
support=True)
self.au.role = amo.AUTHOR_ROLE_SUPPORT
self.au.save()
assert check_addon_ownership(self.request, self.addon, support=True)
class TestCheckReviewer(TestCase):
fixtures = ['base/addon_3615', 'addons/persona']
def setUp(self):
super(TestCheckReviewer, self).setUp()
self.user = UserProfile.objects.get()
self.persona = Addon.objects.get(pk=15663)
self.addon = Addon.objects.get(pk=3615)
def test_no_perm(self):
req = req_factory_factory('noop', user=self.user)
assert not check_addons_reviewer(req)
assert not check_unlisted_addons_reviewer(req)
assert not check_personas_reviewer(req)
def test_perm_addons(self):
self.grant_permission(self.user, 'Addons:Review')
req = req_factory_factory('noop', user=self.user)
assert check_addons_reviewer(req)
assert not check_unlisted_addons_reviewer(req)
assert not check_personas_reviewer(req)
def test_perm_themes(self):
self.grant_permission(self.user, 'Personas:Review')
req = req_factory_factory('noop', user=self.user)
assert not check_addons_reviewer(req)
assert not check_unlisted_addons_reviewer(req)
assert check_personas_reviewer(req)
def test_perm_unlisted_addons(self):
self.grant_permission(self.user, 'Addons:ReviewUnlisted')
req = req_factory_factory('noop', user=self.user)
assert not check_addons_reviewer(req)
assert check_unlisted_addons_reviewer(req)
assert not check_personas_reviewer(req)
def test_is_editor_for_addon_reviewer(self):
"""An addon editor is also a persona editor."""
self.grant_permission(self.user, 'Addons:Review')
req = req_factory_factory('noop', user=self.user)
assert is_editor(req, self.persona)
assert is_editor(req, self.addon)
def test_is_editor_for_persona_reviewer(self):
self.grant_permission(self.user, 'Personas:Review')
req = req_factory_factory('noop', user=self.user)
assert is_editor(req, self.persona)
assert not is_editor(req, self.addon)
|
jpetto/olympia
|
src/olympia/access/tests.py
|
Python
|
bsd-3-clause
| 9,223
|
import collections
import difflib
import inspect
import logging
import os.path
import warnings
import os
import importlib
import cherrypy
import yaml
from yaml import load
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
json = None
for pkg in ['ujson', 'yajl', 'simplejson', 'cjson', 'json']:
try:
json = importlib.import_module(pkg)
except:
pass
else:
break
from blueberrypy.email import Mailer
from blueberrypy.exc import (BlueberryPyNotConfiguredError,
BlueberryPyConfigurationError)
logger = logging.getLogger(__name__)
class BlueberryPyConfiguration(object):
class _YAMLLoader(Loader):
"""YAML loader supporting additional tags."""
def __init__(self, *args, **kwargs):
super(BlueberryPyConfiguration._YAMLLoader, self).__init__(*args, **kwargs)
self._setup_loader()
def register_tag(self, tag, callback):
yaml.add_constructor(tag, callback, Loader=self.__class__)
def _tag_env_var(self, loader, node):
env_var_name = loader.construct_scalar(node)
return os.getenv(env_var_name)
def _tag_first_of(self, loader, node):
seq = loader.construct_sequence(node)
for v in seq:
if v is not None:
return v
raise yaml.YAMLError('At least one of values passed to !FirstOf tag must be not None')
def _setup_loader(self):
self.register_tag('!EnvVar', self._tag_env_var)
self.register_tag('!FirstOf', self._tag_first_of)
def __init__(self, config_dir=None, app_config=None, logging_config=None,
webassets_env=None, environment=None,
env_var_name='BLUEBERRYPY_CONFIG'):
"""Loads BlueberryPy configuration from `config_dir` if supplied.
If `app_config` or `logging_config` or `webassets_env` are given, they
will be used instead of the configuration files found from `config_dir`.
If `environment` is given, it must be an existing CherryPy environment.
If `environment` is `production`, and `config_dir` is given, the `prod`
subdirectory will be searched for configuration files, otherwise the
`dev` subdirectory` will be searched.
If `env_var_name` is given, it must be an existing environment
variable, it will override values from YAML config.
Upon initialization of this configuration object, all the configuration
will be validated for sanity and either BlueberryPyConfigurationError or
BlueberryPyNotConfiguredError will be thrown if insane. For less severe
configuration insanity cases, a warning will be emitted instead.
:arg config_dir: a path, str
:arg app_config: a CherryPy config, dict
:arg logging_config: a logging config, dict
:arg webassets_env: a webassets environment, webassets.Environment
:arg environment: a CherryPy configuration environment, str
:arg env_var_name: an environment variable name for configuration, str
"""
ENV_CONFIG = self.__class__._load_env_var(env_var_name)
CWD = os.getcwdu() if getattr(os, "getcwdu", None) else os.getcwd()
if ENV_CONFIG.get('global', {}).get('CWD') and \
os.path.isdir(
os.path.join(ENV_CONFIG['global']['CWD'], 'src')):
CWD = ENV_CONFIG['global']['CWD']
if config_dir is None:
self.config_dir = config_dir = os.path.join(CWD, "config")
else:
self.config_dir = config_dir = os.path.abspath(config_dir)
if environment == "production":
self.config_dir = config_dir = os.path.join(config_dir, "prod")
elif environment == "test_suite" and os.path.exists(os.path.join(config_dir, "test")):
self.config_dir = config_dir = os.path.join(config_dir, "test")
else:
self.config_dir = config_dir = os.path.join(config_dir, "dev")
config_file_paths = {}
app_yml_path = os.path.join(config_dir, "app.yml")
logging_yml_path = os.path.join(config_dir, "logging.yml")
bundles_yml_path = os.path.join(config_dir, "bundles.yml")
# A local-only config, which overrides the app.yml values
app_override_yml_path = os.path.join(config_dir, "app.override.yml")
if os.path.exists(app_yml_path):
config_file_paths["app_yml"] = app_yml_path
if os.path.exists(logging_yml_path):
config_file_paths["logging_yml"] = logging_yml_path
if os.path.exists(bundles_yml_path):
config_file_paths["bundles_yml"] = bundles_yml_path
if os.path.exists(app_override_yml_path):
config_file_paths["app_override_yml"] = app_override_yml_path
self._config_file_paths = config_file_paths
if "app_yml" in config_file_paths and not app_config:
with open(config_file_paths["app_yml"]) as app_yml:
self._app_config = load(app_yml, self._YAMLLoader)
# If the overrides file exists, override the app config values
# with ones from app.override.yml
if "app_override_yml" in config_file_paths:
app_override_config = {}
with open(config_file_paths["app_override_yml"]) as app_override_yml:
app_override_config = load(app_override_yml, self._YAMLLoader)
self._app_config = self.__class__.merge_dicts(
self._app_config,
app_override_config
)
if "logging_yml" in config_file_paths and not logging_config:
with open(config_file_paths["logging_yml"]) as logging_yml:
self._logging_config = load(logging_yml, self._YAMLLoader)
if "bundles_yml" in config_file_paths and not webassets_env:
from webassets.loaders import YAMLLoader
self._webassets_env = YAMLLoader(config_file_paths["bundles_yml"]).load_environment()
if app_config:
self._app_config = dict(app_config)
try:
# Merge JSON from environment variable
self._app_config = self.__class__.merge_dicts(self._app_config, ENV_CONFIG)
except AttributeError:
if ENV_CONFIG: # not an empty dict
self._app_config = ENV_CONFIG
# Don't re-raise exception, self.validate() will do this later
if logging_config:
self._logging_config = dict(logging_config)
if webassets_env is not None:
self._webassets_env = webassets_env
self.validate() # Checks that all attributes are pre-populated
# Convert relative paths to absolute where needed
# self.validate() will fail if there's no app_config['controllers']
for _ in self._app_config['controllers']:
section = self._app_config['controllers'][_]
for r in section:
if isinstance(section[r], dict):
for __ in ['tools.staticdir.root',
'tools.staticfile.root']:
pth = section[r].get(__)
if pth is not None and not pth.startswith('/'):
self._app_config['controllers'][_][r][__] = \
os.path.join(CWD, pth)
# Convert relative paths of logs in handlers
# self.validate() will fail if there's no self._logging_config
for handler_name, handler_config in (getattr(self, '_logging_config', {}) or {}).get('handlers', {}).viewitems():
pth = handler_config.get('filename')
if pth is not None and not pth.startswith('/'):
self._logging_config['handlers'][handler_name]['filename'] = \
os.path.join(CWD, pth)
if environment == "backlash":
self.setup_backlash_environment()
@property
def config_file_paths(self):
if self._config_file_paths:
sorted_kv_pairs = tuple(((k, self._config_file_paths[k])
for k in sorted(self._config_file_paths.viewkeys())))
paths = collections.namedtuple("config_file_paths", [e[0] for e in sorted_kv_pairs])
return paths(*[e[1] for e in sorted_kv_pairs])
@property
def project_metadata(self):
return self.app_config["project_metadata"]
@property
def use_logging(self):
return self.app_config.get("global", {}).get("engine.logging.on", False)
@property
def use_redis(self):
if self.controllers_config:
for _, controller_config in self.controllers_config.viewitems():
controller_config = controller_config.copy()
controller_config.pop("controller")
for path_config in controller_config.viewvalues():
if path_config.get("tools.sessions.storage_type") == "redis":
return True
return False
@property
def use_sqlalchemy(self):
return self.app_config.get("global", {}).get("engine.sqlalchemy.on", False)
@property
def use_jinja2(self):
return "jinja2" in self.app_config
@property
def use_webassets(self):
return self.use_jinja2 and self.app_config["jinja2"].get("use_webassets", False)
@property
def use_email(self):
return "email" in self.app_config
@property
def controllers_config(self):
return self.app_config.get("controllers")
@property
def app_config(self):
return self._app_config
@property
def logging_config(self):
return getattr(self, "_logging_config", None)
@property
def webassets_env(self):
return getattr(self, "_webassets_env", None)
@property
def jinja2_config(self):
if self.use_jinja2:
conf = self.app_config["jinja2"].copy()
conf.pop("use_webassets", None)
return conf
@property
def sqlalchemy_config(self):
if self.use_sqlalchemy:
if "sqlalchemy_engine" in self.app_config:
saconf = self.app_config["sqlalchemy_engine"].copy()
return {"sqlalchemy_engine": saconf}
else:
return dict([(k, v) for k, v in self.app_config.viewitems()
if k.startswith("sqlalchemy_engine")])
@property
def email_config(self):
return self.app_config.get("email")
def setup_backlash_environment(self):
"""
Returns a new copy of this configuration object configured to run under
the backlash defbugger environment and ensure it is created for
cherrypy's config object.
"""
try:
from backlash import DebuggedApplication
except ImportError:
warnings.warn("backlash not installed")
return
cherrypy._cpconfig.environments["backlash"] = {
"log.wsgi": True,
"request.throw_errors": True,
"log.screen": False,
"engine.autoreload_on": False
}
def remove_error_options(section):
section.pop("request.handler_error", None)
section.pop("request.error_response", None)
section.pop("tools.err_redirect.on", None)
section.pop("tools.log_headers.on", None)
section.pop("tools.log_tracebacks.on", None)
for k in section.copy().viewkeys():
if k.startswith("error_page.") or \
k.startswith("request.error_page."):
section.pop(k)
for section_name, section in self.app_config.viewitems():
if section_name.startswith("/") or section_name == "global":
remove_error_options(section)
wsgi_pipeline = []
if "/" in self.app_config:
wsgi_pipeline = self.app_config["/"].get("wsgi.pipeline", [])
else:
self.app_config["/"] = {}
wsgi_pipeline.insert(0, ("backlash", DebuggedApplication))
self.app_config["/"]["wsgi.pipeline"] = wsgi_pipeline
def validate(self):
# no need to check for cp config, which will be checked on startup
if not hasattr(self, "_app_config") or not self.app_config:
raise BlueberryPyNotConfiguredError("BlueberryPy application configuration not found.")
if self.use_sqlalchemy and not self.sqlalchemy_config:
raise BlueberryPyNotConfiguredError("SQLAlchemy configuration not found.")
if self.use_webassets:
if self.webassets_env is None:
raise BlueberryPyNotConfiguredError("Webassets configuration not found.")
elif len(self.webassets_env) == 0:
raise BlueberryPyNotConfiguredError("No bundles found in webassets env.")
if self.use_jinja2 and not self.jinja2_config:
raise BlueberryPyNotConfiguredError("Jinja2 configuration not found.")
if self.use_logging and not self.logging_config:
warnings.warn("BlueberryPy application-specific logging "
"configuration not found. Continuing without "
"BlueberryPy's logging plugin.")
if self.use_email:
if not self.email_config:
warnings.warn("BlueberryPy email configuration is empty.")
else:
try:
signature = inspect.signature(Mailer.__init__)
argnames = frozenset(signature.parameters.keys()[1:])
except AttributeError:
mailer_ctor_argspec = inspect.getargspec(Mailer.__init__)
argnames = frozenset(mailer_ctor_argspec.args[1:])
for key in self.email_config.viewkeys():
if key not in argnames:
closest_match = difflib.get_close_matches(key, argnames, 1)
closest_match = ((closest_match and " Did you mean %r?" % closest_match[0])
or "")
warnings.warn(("Unknown key %r found for [email]." % key) + closest_match)
if not self.controllers_config:
raise BlueberryPyConfigurationError("You must declare at least one controller.")
else:
for script_name, section in self.controllers_config.viewitems():
controller = section.get("controller")
if controller is None:
raise BlueberryPyConfigurationError("You must define a controller in the "
"[controllers][%s] section." % script_name)
elif isinstance(controller, cherrypy.dispatch.RoutesDispatcher):
if not controller.controllers:
warnings.warn("Controller %r has no connected routes." % script_name)
else:
for member_name, member_obj in inspect.getmembers(controller):
if member_name == "exposed" and member_obj:
break
elif (hasattr(member_obj, "exposed") and
member_obj.exposed is True):
break
else:
warnings.warn("Controller %r has no exposed method." % script_name)
@classmethod
def _load_env_var(cls, env_var_name):
env_conf = {}
try:
env_conf = json.loads(os.getenv(env_var_name),
object_hook=cls._callable_json_loader)
except ValueError:
# Don't use simplejson.JSONDecodeError, since it only exists in
# simplejson implementation and is a subclass of ValueError
# See: https://github.com/Yelp/mrjob/issues/544
logger.error('${} is not a valid JSON string!'
.format(env_var_name))
except TypeError:
logger.warning('${} environment variable is not set!'
.format(env_var_name))
except:
logger.exception('Could not parse ${} environment variable for an '
'unknown reason!'.format(env_var_name))
return env_conf
@staticmethod
def get_callable_from_str(s):
python_module, python_name = s.rsplit('.', 1)
return getattr(importlib.import_module(python_module), python_name)
@classmethod
def _callable_json_loader(cls, obj):
if isinstance(obj, str):
if obj.startswith('!!python/name:'):
cllbl = cls.get_callable_from_str(obj.split(':', 1)[-1])
return cllbl if callable(cllbl) else obj
if isinstance(obj, dict):
keys = tuple(filter(lambda _: _.startswith('!!python/object:'),
obj.keys()))
for k in keys:
cllbl = cls.get_callable_from_str(k.split(':', 1)[-1])
return cllbl(**obj[k]) if callable(cllbl) else obj
return obj
@classmethod
def merge_dicts(cls, base, overrides):
'''Recursive helper for merging of two dicts'''
for k in overrides.keys():
if k in base:
if isinstance(base[k], dict) and isinstance(overrides[k], dict):
base[k] = cls.merge_dicts(base[k], overrides[k])
elif isinstance(overrides[k], list) and \
not isinstance(base[k], list):
base[k] = [base[k]] + overrides[k]
elif isinstance(base[k], list) and \
not isinstance(overrides[k], list):
base[k] = base[k] + [overrides[k]]
elif not isinstance(base[k], dict):
base[k] = overrides[k]
else:
base[k].update(overrides[k])
else:
base[k] = overrides[k]
return base
|
open-craft-guild/blueberrypy
|
src/blueberrypy/config.py
|
Python
|
bsd-3-clause
| 18,173
|
#!/usr/bin/env python
# Remove .egg-info directory if it exists, to avoid dependency problems with
# partially-installed packages (20160119/dphiffer)
import os
import sys
import shutil
setup = os.path.abspath(sys.argv[0])
parent = os.path.dirname(setup)
pkg = os.path.basename(parent)
if pkg.startswith("py-mapzen"):
pkg = pkg.replace("py-", "")
pkg = pkg.replace("-", ".")
egg_info = "%s.egg-info" % pkg
egg_info = os.path.join(parent, egg_info)
if os.path.exists(egg_info):
shutil.rmtree(egg_info)
from setuptools import setup, find_packages
packages = find_packages()
desc = open("README.md").read()
version = open("VERSION").read()
setup(
name='mapzen.whosonfirst.mapshaper.utils',
namespace_packages=['mapzen', 'mapzen.whosonfirst', 'mapzen.whosonfirst.mapshaper', 'mapzen.whosonfirst.mapshaper.utils'],
version=version,
description='Python utility methods for working with Who\'s On First documents and Mapshaper',
author='Mapzen',
url='https://github.com/whosonfirst/py-mapzen-whosonfirst-mapshaper-utils',
install_requires=[
'mapzen.whosonfirst.mapshaper>=0.05',
],
dependency_links=[
'https://github.com/whosonfirst/py-mapzen-whosonfirst-mapshaper/tarball/master#egg=mapzen.whosonfirst.mapshaper-0.05',
],
packages=packages,
scripts=[
],
download_url='https://github.com/whosonfirst/py-mapzen-whosonfirst-mapshaper-utils/releases/tag/' + version,
license='BSD')
|
whosonfirst/py-mapzen-whosonfirst-mapshaper-utils
|
setup.py
|
Python
|
bsd-3-clause
| 1,496
|
from productos.models import Categoria, Imagen
from django.contrib import admin
from imagekit.admin import AdminThumbnail
# Register your models here.
class ImagenAdmin(admin.ModelAdmin):
imagen = AdminThumbnail(image_field='imagen_miniatura')
list_display = ('nombre', 'categoria','imagen')
admin.site.register(Categoria)
admin.site.register(Imagen,ImagenAdmin)
|
gabrielf10/Soles-pythonanywhere
|
productos/admin.py
|
Python
|
bsd-3-clause
| 378
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from unittest import TestCase, main
from qiita_core.exceptions import IncompetentQiitaDeveloperError
from qiita_core.util import qiita_test_checker
from qiita_core.qiita_settings import qiita_config
import qiita_db as qdb
@qiita_test_checker()
class QiitaBaseTest(TestCase):
"""Tests that the base class functions act correctly"""
def setUp(self):
# We need an actual subclass in order to test the equality functions
self.tester = qdb.artifact.Artifact(1)
self.portal = qiita_config.portal
def tearDown(self):
qiita_config.portal = self.portal
def test_init_base_error(self):
"""Raises an error when instantiating a base class directly"""
with self.assertRaises(IncompetentQiitaDeveloperError):
qdb.base.QiitaObject(1)
def test_init_error_inexistent(self):
"""Raises an error when instantiating an object that does not exists"""
with self.assertRaises(qdb.exceptions.QiitaDBUnknownIDError):
qdb.artifact.Artifact(10)
def test_check_subclass(self):
"""Nothing happens if check_subclass called from a subclass"""
self.tester._check_subclass()
def test_check_subclass_error(self):
"""check_subclass raises an error if called from a base class"""
# Checked through the __init__ call
with self.assertRaises(IncompetentQiitaDeveloperError):
qdb.base.QiitaObject(1)
with self.assertRaises(IncompetentQiitaDeveloperError):
qdb.base.QiitaStatusObject(1)
def test_check_id(self):
"""Correctly checks if an id exists on the database"""
self.assertTrue(self.tester._check_id(1))
self.assertFalse(self.tester._check_id(100))
def test_check_portal(self):
"""Correctly checks if object is accessable in portal given"""
qiita_config.portal = 'QIITA'
tester = qdb.analysis.Analysis(1)
self.assertTrue(tester._check_portal(1))
qiita_config.portal = 'EMP'
self.assertFalse(tester._check_portal(1))
self.assertTrue(self.tester._check_portal(1))
def test_equal_self(self):
"""Equality works with the same object"""
self.assertEqual(self.tester, self.tester)
def test_equal(self):
"""Equality works with two objects pointing to the same instance"""
new = qdb.artifact.Artifact(1)
self.assertEqual(self.tester, new)
def test_not_equal(self):
"""Not equals works with object of the same type"""
sp1 = qdb.study.StudyPerson(1)
sp2 = qdb.study.StudyPerson(2)
self.assertNotEqual(sp1, sp2)
def test_not_equal_type(self):
"""Not equals works with object of different type"""
new = qdb.study.Study(1)
self.assertNotEqual(self.tester, new)
@qiita_test_checker()
class QiitaStatusObjectTest(TestCase):
"""Tests that the QittaStatusObject class functions act correctly"""
def setUp(self):
# We need an actual subclass in order to test the equality functions
self.tester = qdb.analysis.Analysis(1)
def test_status(self):
"""Correctly returns the status of the object"""
self.assertEqual(self.tester.status, "in_construction")
def test_check_status_single(self):
"""check_status works passing a single status"""
self.assertTrue(self.tester.check_status(["in_construction"]))
self.assertFalse(self.tester.check_status(["queued"]))
def test_check_status_exclude_single(self):
"""check_status works passing a single status and the exclude flag"""
self.assertTrue(self.tester.check_status(["public"], exclude=True))
self.assertFalse(self.tester.check_status(["in_construction"],
exclude=True))
def test_check_status_list(self):
"""check_status work passing a list of status"""
self.assertTrue(self.tester.check_status(
["in_construction", "queued"]))
self.assertFalse(self.tester.check_status(
["public", "queued"]))
def test_check_status_exclude_list(self):
"""check_status work passing a list of status and the exclude flag"""
self.assertTrue(self.tester.check_status(
["public", "queued"], exclude=True))
self.assertFalse(self.tester.check_status(
["in_construction", "queued"], exclude=True))
def test_check_status_unknown_status(self):
"""check_status raises an error if an invalid status is provided"""
with self.assertRaises(ValueError):
self.tester.check_status(["foo"])
with self.assertRaises(ValueError):
self.tester.check_status(["foo"], exclude=True)
def test_check_status_unknown_status_list(self):
"""check_status raises an error if an invalid status list is provided
"""
with self.assertRaises(ValueError):
self.tester.check_status(["foo", "bar"])
with self.assertRaises(ValueError):
self.tester.check_status(["foo", "bar"], exclude=True)
if __name__ == '__main__':
main()
|
squirrelo/qiita
|
qiita_db/test/test_base.py
|
Python
|
bsd-3-clause
| 5,463
|
import datetime
from mock import patch
from pretend import stub
from gurtel import session
def test_annotates_request():
"""Annotates request with ``session`` property."""
request = stub(
cookies={},
app=stub(secret_key='secret', is_ssl=True, config={}),
)
session.session_middleware(request, lambda req: None)
assert request.session.secret_key == 'secret'
@patch.object(session.JSONSecureCookie, 'save_cookie')
def test_sets_cookie_on_response(mock_save_cookie):
"""Calls ``save_cookie`` on response."""
request = stub(
cookies={},
app=stub(secret_key='secret', is_ssl=True, config={}),
)
response = stub()
session.session_middleware(request, lambda req: response)
mock_save_cookie.assert_called_once_with(
response, httponly=True, secure=True)
@patch.object(session.JSONSecureCookie, 'save_cookie')
@patch.object(session.timezone, 'now')
def test_can_set_expiry(mock_now, mock_save_cookie):
"""Calls ``save_cookie`` on response with expiry date, if configured."""
request = stub(
cookies={},
app=stub(
secret_key='secret',
is_ssl=True,
config={'session.expiry_minutes': '1440'},
),
)
response = stub()
mock_now.return_value = datetime.datetime(2013, 11, 22)
session.session_middleware(request, lambda req: response)
mock_save_cookie.assert_called_once_with(
response,
httponly=True,
secure=True,
expires=datetime.datetime(2013, 11, 23),
)
|
oddbird/gurtel
|
tests/test_session.py
|
Python
|
bsd-3-clause
| 1,572
|
# -*- coding: utf-8 -*-
"""
Django settings for LittleSportsBiscuit project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (/a/b/myfile.py - 3 = /)
APPS_DIR = ROOT_DIR.path('LittleSportsBiscuit')
env = environ.Env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
)
# Apps specific for this project go here.
LOCAL_APPS = (
'LittleSportsBiscuit.users', # custom users app
'LittleSportsBiscuit',
# Your stuff: custom apps go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'LittleSportsBiscuit.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""Kevin A. Miller""", 'kevin@maninmotion.com'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db("DATABASE_URL", default="postgres:///LittleSportsBiscuit"),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'EST'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'mail_admins'],
'propagate': True
}
}
}
# Location of root django.contrib.admin URL, use {% url 'admin:index' %}
ADMIN_URL = r'^admin/'
# Your common stuff: Below this line define 3rd party library settings
|
maninmotion/LittleSportsBiscuit
|
config/settings/common.py
|
Python
|
bsd-3-clause
| 9,768
|
"""ZFS based backup workflows."""
import datetime
import shlex
import gflags
import lvm
import workflow
FLAGS = gflags.FLAGS
gflags.DEFINE_string('rsync_options',
'--archive --acls --numeric-ids --delete --inplace',
'rsync command options')
gflags.DEFINE_string('rsync_path', '/usr/bin/rsync', 'path to rsync binary')
gflags.DEFINE_string('zfs_snapshot_prefix', 'ari-backup-',
'prefix for historical ZFS snapshots')
gflags.DEFINE_string('zfs_snapshot_timestamp_format', '%Y-%m-%d--%H%M',
'strftime() formatted timestamp used when naming new ZFS snapshots')
class ZFSLVMBackup(lvm.LVMSourceMixIn, workflow.BaseWorkflow):
"""Workflow for backing up a logical volume to a ZFS dataset.
Data is copied from and LVM snapshot to a ZFS dataset using rsync and then
ZFS commands are issued to create historical snapshots. The ZFS snapshot
lifecycle is also managed by this class. When a backup completes, snapshots
older than snapshot_expiration_days are destroyed.
This approach has some benefits over rdiff-backup in that all backup
datapoints are easily browseable and replication of the backup data using
ZFS streams is generally less resource intensive than using something like
rsync to mirror the files created by rdiff-backup.
One downside is that it's easier to store all file metadata using
rdiff-backup. Rsync can only store metadata for files that the destination
file system can also store. For example, if extended file system
attributes are used on the source file system, but aren't available on the
destination, rdiff-backup will still record those attributes in its own
files. If faced with that same scenario, rsync would lose those attributes.
Furthermore, rsync must have root privilege to write arbitrary file
metadata.
New post-job hooks are added for creating ZFS snapshots and trimming old
ones.
"""
def __init__(self, label, source_hostname, rsync_dst, zfs_hostname,
dataset_name, snapshot_expiration_days, **kwargs):
"""Configure a ZFSLVMBackup object.
Args:
label: str, label for the backup job (e.g. database-server1).
source_hostname: str, the name of the host with the source data to
backup.
rsync_dst: str, the destination argument for the rsync command line
(e.g. backupbox:/backup-store/database-server1).
zfs_hostname: str, the name of the backup destination host where we will
be managing the ZFS snapshots.
dataset_name: str, the full ZFS path (not file system path) to the
dataset holding the backups for this job
(e.g. tank/backup-store/database-server1).
snapshot_expiration_days: int, the maxmium age of a ZFS snapshot in days.
Pro tip: It's a good practice to reuse the label argument as the last
path component in the rsync_dst and dataset_name arguments.
"""
# Call our super class's constructor to enable LVM snapshot management
super(ZFSLVMBackup, self).__init__(label, **kwargs)
# Assign instance vars specific to this class.
self.source_hostname = source_hostname
self.rsync_dst = rsync_dst
self.zfs_hostname = zfs_hostname
self.dataset_name = dataset_name
# Assign flags to instance vars so they might be easily overridden in
# workflow configs.
self.rsync_options = FLAGS.rsync_options
self.rsync_path = FLAGS.rsync_path
self.zfs_snapshot_prefix = FLAGS.zfs_snapshot_prefix
self.zfs_snapshot_timestamp_format = FLAGS.zfs_snapshot_timestamp_format
self.add_post_hook(self._create_zfs_snapshot)
self.add_post_hook(self._destroy_expired_zfs_snapshots,
{'days': snapshot_expiration_days})
def _get_current_datetime(self):
"""Returns datetime object with the current date and time.
This method is mostly useful for testing purposes.
"""
return datetime.datetime.now()
def _run_custom_workflow(self):
"""Run rsync backup of LVM snapshot to ZFS dataset."""
# TODO(jpwoodbu) Consider throwing an exception if we see things in the
# include or exclude lists since we don't use them in this class.
self.logger.debug('ZFSLVMBackup._run_custom_workflow started.')
# Since we're dealing with ZFS datasets, let's always exclude the .zfs
# directory in our rsync options.
rsync_options = shlex.split(self.rsync_options) + ['--exclude', '/.zfs']
# We add a trailing slash to the src path otherwise rsync will make a
# subdirectory at the destination, even if the destination is already a
# directory.
rsync_src = self._snapshot_mount_point_base_path + '/'
command = [self.rsync_path] + rsync_options + [rsync_src, self.rsync_dst]
self.run_command(command, self.source_hostname)
self.logger.debug('ZFSLVMBackup._run_custom_workflow completed.')
def _create_zfs_snapshot(self, error_case):
"""Creates a new ZFS snapshot of our destination dataset.
The name of the snapshot will include the zfs_snapshot_prefix provided by
FLAGS and a timestamp. The zfs_snapshot_prefix is used by
_remove_zfs_snapshots_older_than() when deciding which snapshots to
destroy. The timestamp encoded in a snapshot name is only for end-user
convenience. The creation metadata on the ZFS snapshot is what is used to
determine a snapshot's age.
This method does nothing if error_case is True.
Args:
error_case: bool, whether an error has occurred during the backup.
"""
if not error_case:
self.logger.info('Creating ZFS snapshot...')
timestamp = self._get_current_datetime().strftime(
self.zfs_snapshot_timestamp_format)
snapshot_name = self.zfs_snapshot_prefix + timestamp
snapshot_path = '{dataset_name}@{snapshot_name}'.format(
dataset_name=self.dataset_name, snapshot_name=snapshot_name)
command = ['zfs', 'snapshot', snapshot_path]
self.run_command(command, self.zfs_hostname)
def _find_snapshots_older_than(self, days):
"""Returns snapshots older than the given number of days.
Only snapshots that meet the following criteria are returned:
1. They were created at least "days" ago.
2. Their name is prefixed with FLAGS.zfs_snapshot_prefix.
Args:
days: int, the minimum age of the snapshots in days.
Returns:
A list of filtered snapshots.
"""
expiration = self._get_current_datetime() - datetime.timedelta(days=days)
# Let's find all the snapshots for this dataset.
command = ['zfs', 'get', '-rH', '-o', 'name,value', 'type',
self.dataset_name]
stdout, unused_stderr = self.run_command(command, self.zfs_hostname)
snapshots = list()
# Sometimes we get extra lines which are empty, so we'll strip the lines.
for line in stdout.strip().splitlines():
name, dataset_type = line.split('\t')
if dataset_type == 'snapshot':
# Let's try to only consider destroying snapshots made by us ;)
if name.split('@')[1].startswith(self.zfs_snapshot_prefix):
snapshots.append(name)
expired_snapshots = list()
for snapshot in snapshots:
creation_time = self._get_snapshot_creation_time(snapshot)
if creation_time <= expiration:
expired_snapshots.append(snapshot)
return expired_snapshots
def _get_snapshot_creation_time(self, snapshot):
"""Gets the creation time of a snapshot as a Python datetime object
Args:
snapshot: str, the fule ZFS path to the snapshot.
Returns:
A datetime object representing the creation time of the snapshot.
"""
command = ['zfs', 'get', '-H', '-o', 'value', 'creation', snapshot]
stdout, unused_stderr = self.run_command(command, self.zfs_hostname)
return datetime.datetime.strptime(stdout.strip(), '%a %b %d %H:%M %Y')
def _destroy_expired_zfs_snapshots(self, days, error_case):
"""Destroy snapshots older than the given numnber of days.
Any snapshots in the target dataset with a name that starts with
FLAGS.zfs_snapshot_prefix and a creation date older than days will be
destroyed. Depending on the size of the snapshots and the performance of
the disk subsystem, this operation could take a while.
This method does nothing if error_case is True.
Args:
days: int, the max age of a snapshot in days.
error_case: bool, whether an error has occurred during the backup.
"""
if not error_case:
self.logger.info('Looking for expired ZFS snapshots...')
snapshots = self._find_snapshots_older_than(days)
# Sentinel value used to log if we destroyed no snapshots.
snapshots_destroyed = False
# Destroy expired snapshots.
for snapshot in snapshots:
command = ['zfs', 'destroy', snapshot]
self.run_command(command, self.zfs_hostname)
snapshots_destroyed = True
self.logger.info('{snapshot} destroyed.'.format(snapshot=snapshot))
if not snapshots_destroyed:
self.logger.info('Found no expired ZFS snapshots.')
|
rbarlow/ari-backup
|
ari_backup/zfs.py
|
Python
|
bsd-3-clause
| 9,110
|
from django.contrib import admin
from workflow.models import State, StateLog, NextState, Project, Location
from workflow.activities import StateActivity
class NextStateInline(admin.StackedInline):
model = NextState
fk_name = 'current_state'
extra = 0
class StateAdmin(admin.ModelAdmin):
inlines = [NextStateInline, ]
list_display = ('name', 'is_work_state',)
class StateLogAdmin(admin.ModelAdmin):
readonly_fields = ['start', 'end', 'state', 'user']
list_display = ('user', 'state', 'project', 'location', 'start', 'end',)
admin.site.register(State, StateAdmin)
admin.site.register(StateLog, StateLogAdmin)
admin.site.register(Project)
admin.site.register(Location)
|
django-stars/dash2011
|
presence/apps/workflow/admin.py
|
Python
|
bsd-3-clause
| 703
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017, Zhijiang Yao, Jie Dong and Dongsheng Cao
# All rights reserved.
# This file is part of the PyBioMed.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the PyBioMed source tree.
"""
##############################################################################
A class used for computing different types of drug descriptors!
You can freely use and distribute it. If you have any problem,
you could contact with us timely.
Authors: Dongsheng Cao and Yizeng Liang.
Date: 2012.09.24
Email: oriental-cds@163.com
##############################################################################
"""
# Core Library modules
import string
# Third party modules
from rdkit import Chem
# First party modules
from PyBioMed.PyGetMol import Getmol as getmol
from PyBioMed.PyMolecule import (
AtomTypes,
basak,
bcut,
cats2d,
charge,
connectivity,
constitution,
estate,
fingerprint,
geary,
ghosecrippen,
kappa,
moe,
molproperty,
moran,
moreaubroto,
topology,
)
Version = 1.0
FingerprintName = [
"FP2",
"FP3",
"FP4",
"topological",
"Estate",
"atompairs",
"torsions",
"morgan",
"ECFP2",
"ECFP4",
"ECFP6",
"MACCS",
"FCFP2",
"FCFP4",
"FCFP6",
"Pharm2D2point",
"Pharm2D3point",
"GhoseCrippen",
"PubChem",
]
##############################################################################
class PyMolecule:
"""
#################################################################
A PyDrug class used for computing drug descriptors.
#################################################################
"""
def __init__(self):
"""
#################################################################
constructor of PyMolecule.
#################################################################
"""
pass
def ReadMolFromMOL(self, filename=""):
"""
#################################################################
Read a molecule by SDF or MOL file format.
Usage:
res=ReadMolFromFile(filename)
Input: filename is a file name.
Output: res is a molecule object.
#################################################################
"""
self.mol = Chem.MolFromMolMOL(filename)
return self.mol
def ReadMolFromSmile(self, smi=""):
"""
#################################################################
Read a molecule by SMILES string.
Usage:
res=ReadMolFromSmile(smi)
Input: smi is a SMILES string.
Output: res is a molecule object.
#################################################################
"""
self.mol = Chem.MolFromSmiles(smi.strip())
return self.mol
def ReadMolFromInchi(self, inchi=""):
"""
#################################################################
Read a molecule by Inchi string.
Usage:
res=ReadMolFromInchi(inchi)
Input: inchi is a InChi string.
Output: res is a molecule object.
#################################################################
"""
from openbabel import pybel
temp = pybel.readstring("inchi", inchi)
smi = temp.write("smi")
self.mol = Chem.MolFromSmiles(smi.strip())
return self.mol
def ReadMolFromMol(self, filename=""):
"""
#################################################################
Read a molecule with mol file format.
Usage:
res=ReadMolFromMol(filename)
Input: filename is a file name.
Output: res is a molecule object.
#################################################################
"""
self.mol = Chem.MolFromMolFile(filename)
return self.mol
def GetMolFromNCBI(self, ID=""):
"""
#################################################################
Get a molecule by NCBI id (e.g., 2244).
Usage:
res=GetMolFromNCBI(ID)
Input: ID is a compound ID (CID) in NCBI.
Output: res is a SMILES string.
#################################################################
"""
res = getmol.GetMolFromNCBI(cid=ID)
return res
def GetMolFromEBI(self, ID=""):
"""
#################################################################
Get a molecule by EBI id.
Usage:
res=GetMolFromEBI(ID)
Input: ID is a compound identifier in EBI.
Output: res is a SMILES string.
#################################################################
"""
res = getmol.GetMolFromEBI(ID)
return res
def GetMolFromCAS(self, ID=""):
"""
#################################################################
Get a molecule by kegg id (e.g., 50-29-3).
Usage:
res=GetMolFromCAS(ID)
Input: ID is a CAS identifier.
Output: res is a SMILES string.
#################################################################
"""
res = getmol.GetMolFromCAS(casid=ID)
return res
def GetMolFromKegg(self, ID=""):
"""
#################################################################
Get a molecule by kegg id (e.g., D02176).
Usage:
res=GetMolFromKegg(ID)
Input: ID is a compound identifier in KEGG.
Output: res is a SMILES string.
#################################################################
"""
res = getmol.GetMolFromKegg(kid=ID)
return res
def GetMolFromDrugbank(self, ID=""):
"""
#################################################################
Get a molecule by drugbank id (e.g.,DB00133).
Usage:
res=GetMolFromDrugbank(ID)
Input: ID is a compound identifier in Drugbank.
Output: res is a SMILES string.
#################################################################
"""
res = getmol.GetMolFromDrugbank(dbid=ID)
return res
def GetKappa(self):
"""
#################################################################
Calculate all kappa descriptors (7).
Usage:
res=GetKappa()
res is a dict form.
#################################################################
"""
res = kappa.GetKappa(self.mol)
return res
def GetCharge(self):
"""
#################################################################
Calculate all charge descriptors (25).
Usage:
res=GetCharge()
res is a dict form.
#################################################################
"""
res = charge.GetCharge(self.mol)
return res
def GetConnectivity(self):
"""
#################################################################
Calculate all conenctivity descriptors (44).
Usage:
res=GetConnectivity()
res is a dict form.
#################################################################
"""
res = connectivity.GetConnectivity(self.mol)
return res
def GetConstitution(self):
"""
#################################################################
Calculate all constitutional descriptors (30).
Usage:
res=GetConstitution()
res is a dict form.
#################################################################
"""
res = constitution.GetConstitutional(self.mol)
return res
def GetBasak(self):
"""
#################################################################
Calculate all basak's information content descriptors (21).
Usage:
res=GetBasak()
res is a dict form.
#################################################################
"""
res = basak.Getbasak(self.mol)
return res
def GetBurden(self):
"""
#################################################################
Calculate all Burden descriptors (64).
Usage:
res=GetBurden()
res is a dict form.
#################################################################
"""
res = bcut.GetBurden(self.mol)
return res
def GetEstate(self):
"""
#################################################################
Calculate estate descriptors (316).
Usage:
res=GetEstate()
res is a dict form.
#################################################################
"""
res = estate._GetEstate(self.mol)
return res
def GetGeary(self):
"""
#################################################################
Calculate all Geary autocorrelation descriptors (32).
Usage:
res=GetGeary()
res is a dict form.
#################################################################
"""
res = geary.GetGearyAuto(self.mol)
return res
def GetMOE(self):
"""
#################################################################
Calculate all MOE-type descriptors (60).
Usage:
res=GetMOE()
res is a dict form.
#################################################################
"""
res = moe.GetMOE(self.mol)
return res
def GetMolProperty(self):
"""
#################################################################
Calculate all molecular properties (6).
Usage:
res=GetMolProperty()
res is a dict form.
#################################################################
"""
res = molproperty.GetMolecularProperty(self.mol)
return res
def GetMoran(self):
"""
#################################################################
Calculate all Moran autocorrealtion descriptors (32).
Usage:
res=GetMoran()
res is a dict form.
#################################################################
"""
res = moran.GetMoranAuto(self.mol)
return res
def GetMoreauBroto(self):
"""
#################################################################
Calculate all Moreau-Broto autocorrelation descriptors(32).
Usage:
res=GetMoreauBroto()
res is a dict form.
#################################################################
"""
res = moreaubroto.GetMoreauBrotoAuto(self.mol)
return res
def GetTopology(self):
"""
#################################################################
Calculate all topological descriptors (25).
Usage:
res=GetTopology()
res is a dict form.
#################################################################
"""
res = topology.GetTopology(self.mol)
return res
def GetFingerprint(self, FPName="topological", **kwargs):
"""
#################################################################
Calculate all fingerprint descriptors.
see the fingerprint type in FingerprintName
Usage:
res=GetFingerprint(FPName='topological')
res is a tuple or list or dict.
#################################################################
"""
if FPName in FingerprintName:
temp = fingerprint._FingerprintFuncs[FPName]
res = temp(self.mol, **kwargs)
return res
else:
# res=fingerprint.CalculateDaylightFingerprint(self.mol)
res = "This is not a valid fingerprint name!!"
return res
def GetCATS2D(self):
"""
#################################################################
The main program for calculating the CATS descriptors.
CATS: chemically advanced template serach
----> CATS_DA0 ....
Usage:
result=CATS2D(mol,PathLength = 10,scale = 1)
Input: mol is a molecule object.
PathLength is the max topological distance between two atoms.
scale is the normalization method (descriptor scaling method)
scale = 1 indicates that no normalization. That is to say: the
values of the vector represent raw counts ("counts").
scale = 2 indicates that division by the number of non-hydrogen
atoms (heavy atoms) in the molecule.
scale = 3 indicates that division of each of 15 possible PPP pairs
by the added occurrences of the two respective PPPs.
Output: result is a dict format with the definitions of each descritor.
#################################################################
"""
res = cats2d.CATS2D(self.mol, PathLength=10, scale=3)
return res
# def GetGhoseCrippenFingerprint(self, FPName='GhoseCrippenFingerprint'):
# """
# #################################################################
# Ghose-Crippen substructures based on the definitions of
#
# SMARTS from Ghose-Crippen's paper. (110 dimension)
#
# The result is a dict format.
# #################################################################
# """
# res = ghosecrippen.GhoseCrippenFingerprint(self.mol)
#
# return res
#
#
# def GetGhoseCrippen(self, FPName='GetGhoseCrippen'):
# """
# #################################################################
# Ghose-Crippen counts based on the definitions of
#
# SMARTS from Ghose-Crippen's paper. (110 dimension)
#
# The result is a dict format.
# #################################################################
# """
# res = ghosecrippen.GhoseCrippenFingerprint(self.mol, count = True)
#
# return res
def GetAllDescriptor(self):
"""
#################################################################
Calculate all descriptors (608).
Usage:
res=GetAllDescriptor()
res is a dict form.
#################################################################
"""
res = {}
res.update(self.GetKappa())
res.update(self.GetCharge())
res.update(self.GetConnectivity())
res.update(self.GetConstitution())
res.update(self.GetEstate())
res.update(self.GetGeary())
res.update(self.GetMOE())
res.update(self.GetMoran())
res.update(self.GetMoreauBroto())
res.update(self.GetTopology())
res.update(self.GetMolProperty())
res.update(self.GetBasak())
res.update(self.GetBurden())
res.update(self.GetCATS2D())
return res
##############################################################################
if __name__ == "__main__":
drugclass = PyMolecule()
drugclass.ReadMolFromSmile("CCC1(c2ccccc2)C(=O)N(C)C(=N1)O")
print(drugclass.GetCharge())
print(drugclass.GetKappa())
print(len(drugclass.GetKappa()))
print(drugclass.GetTopology())
print(len(drugclass.GetTopology()))
print(drugclass.GetMoreauBroto())
res = drugclass.GetAllDescriptor()
print(len(res))
# print drugclass.GetMolFromDrugbank(ID="DB00133")
# res=drugclass.GetFingerprint(FPName='Estate')
print(res)
print(len(res))
print(drugclass.GetConnectivity())
DrugBankID = "DB01014"
drugclass = PyMolecule()
smi = drugclass.GetMolFromDrugbank(DrugBankID)
drugclass.ReadMolFromSmile(smi)
print(drugclass.GetKappa())
print(drugclass.GetCATS2D())
print(drugclass.GetFingerprint(FPName="Estate"))
# print drugclass.GetGhoseCrippen()
# print drugclass.GetGhoseCrippenFingerprint()
print(len(drugclass.GetBasak()))
print(len(drugclass.GetBurden()))
|
gadsbyfly/PyBioMed
|
PyBioMed/Pymolecule.py
|
Python
|
bsd-3-clause
| 16,673
|
import datetime
import Adafruit_BBIO.GPIO as GPIO
import tornado.gen
class Baster(object):
"""
Controller for the baster
"""
def __init__(self, baster_pin):
"""
Initializes the controller for a baster and sets it closed
:param baster_pin: The BBB GPIO to use, e.g. P8_14
:type baster_pin: str
"""
self._baster_pin = baster_pin
self._baste_off_handle = None
self._baste_periodic_handle = None
self._duration = 0
self._frequency = 0
GPIO.setup(self._baster_pin, GPIO.OUT)
GPIO.output(self._baster_pin, GPIO.LOW)
def config(self, frequency, duration):
"""
Configures the baster to baste for duration seconds every
frequency minutes
:param frequency: The frequency, in minutes, to baste
:type freqeuncy: float
:param duration: The duration, in seconds, to baste
:type duration: float
:raises: ValueError
"""
if 0 > frequency:
raise ValueError('Baste frequency must be >= 0')
if 0 >= duration:
raise ValueError('Baste duration must be > 0')
self._duration = duration
self._frequency = frequency
if self._baste_periodic_handle:
self._baste_periodic_handle.stop()
self._baste_periodic_handle = None
if self._baste_off_handle:
tornado.ioloop.IOLoop.instance().remove_timeout(
self._baste_off_handle
)
self._baste_off_handle = None
self._baste_off()
if frequency > 0:
self._baste_periodic_handle = tornado.ioloop.PeriodicCallback(
self._baste,
frequency * 60 * 1000)
self._baste_periodic_handle.start()
self._baste()
def get_settings(self):
"""
Returns the current baste frequency and duration
:returns: Tuple containing the baste frequency and duration
:rtype: Tuple
"""
return (self._frequency, self._duration)
def _baste(self):
"""
Bastes for the defined duration set in config
"""
ioloop = tornado.ioloop.IOLoop.instance()
if self._baste_off_handle:
ioloop.remove_timeout(self._baste_off_handle)
self._baste_off_handle = None
GPIO.output(self._baster_pin, GPIO.HIGH)
self._baste_off_handle = ioloop.add_timeout(
datetime.timedelta(seconds=self._duration),
self._baste_off)
def _baste_off(self):
"""
Turns off the basting
"""
GPIO.output(self._baster_pin, GPIO.LOW)
|
Caligatio/smokematic
|
smokematic/baster.py
|
Python
|
bsd-3-clause
| 2,697
|
"""
WSGI config for jobboardscraper project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jobboardscraper.settings")
application = get_wsgi_application()
|
richardcornish/timgorin
|
jobboardscraper/jobboardscraper/wsgi.py
|
Python
|
bsd-3-clause
| 408
|
# -*- coding: utf-8 -*-
import datetime
import json
import os
import shutil
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.core.urlresolvers import reverse
import mock
from nose.tools import eq_, ok_
from pyquery import PyQuery as pq
import mkt
from mkt.constants.applications import DEVICE_TYPES
from mkt.files.tests.test_models import UploadTest as BaseUploadTest
from mkt.reviewers.models import EscalationQueue
from mkt.site.fixtures import fixture
from mkt.site.tests import formset, initial, TestCase, user_factory
from mkt.site.tests.test_utils_ import get_image_path
from mkt.submit.decorators import read_dev_agreement_required
from mkt.submit.forms import AppFeaturesForm, NewWebappVersionForm
from mkt.submit.models import AppSubmissionChecklist
from mkt.translations.models import Translation
from mkt.users.models import UserNotification, UserProfile
from mkt.users.notifications import app_surveys
from mkt.webapps.models import AddonDeviceType, AddonUser, AppFeatures, Webapp
class TestSubmit(TestCase):
fixtures = fixture('user_999')
def setUp(self):
self.fi_mock = mock.patch(
'mkt.developers.tasks.fetch_icon').__enter__()
self.user = self.get_user()
self.login(self.user.email)
def tearDown(self):
self.fi_mock.__exit__()
def get_user(self):
return UserProfile.objects.get(email='regular@mozilla.com')
def get_url(self, url):
return reverse('submit.app.%s' % url, args=[self.webapp.app_slug])
def _test_anonymous(self):
self.client.logout()
r = self.client.get(self.url, follow=True)
self.assertLoginRedirects(r, self.url)
def _test_progress_display(self, completed, current):
"""Test that the correct steps are highlighted."""
r = self.client.get(self.url)
progress = pq(r.content)('#submission-progress')
# Check the completed steps.
completed_found = progress.find('.completed')
for idx, step in enumerate(completed):
li = completed_found.eq(idx)
eq_(li.text(), unicode(mkt.APP_STEPS_TITLE[step]))
# Check that we link back to the Developer Agreement.
terms_link = progress.find('.terms a')
if 'terms' in completed:
eq_(terms_link.attr('href'),
reverse('mkt.developers.docs', args=['policies', 'agreement']))
else:
eq_(terms_link.length, 0)
# Check the current step.
eq_(progress.find('.current').text(),
unicode(mkt.APP_STEPS_TITLE[current]))
class TestProceed(TestSubmit):
def setUp(self):
super(TestProceed, self).setUp()
self.user.update(read_dev_agreement=None)
self.url = reverse('submit.app')
def test_is_authenticated(self):
# Redirect user to Terms.
r = self.client.get(self.url)
self.assert3xx(r, reverse('submit.app.terms'))
def test_is_anonymous(self):
# Show user to Terms page but with the login prompt.
self.client.logout()
r = self.client.get(self.url)
eq_(r.status_code, 200)
eq_(r.context['proceed'], True)
class TestTerms(TestSubmit):
def setUp(self):
super(TestTerms, self).setUp()
self.user.update(read_dev_agreement=None)
self.url = reverse('submit.app.terms')
def test_anonymous(self):
self.client.logout()
r = self.client.get(self.url, follow=True)
self.assertLoginRedirects(r, self.url)
def test_jump_to_step(self):
r = self.client.get(reverse('submit.app'), follow=True)
self.assert3xx(r, self.url)
def test_page(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
doc = pq(r.content)('#submit-terms')
eq_(doc.length, 1)
eq_(doc.find('input[name=newsletter]').siblings('label').length, 1,
'Missing its <label>!')
def test_progress_display(self):
self._test_progress_display([], 'terms')
@mock.patch('basket.subscribe')
def test_agree(self, subscribe_mock):
self.client.post(self.url, {'read_dev_agreement': True})
dt = self.get_user().read_dev_agreement
self.assertCloseToNow(dt)
eq_(UserNotification.objects.count(), 0)
assert not subscribe_mock.called
@mock.patch('basket.subscribe')
def test_agree_and_sign_me_up(self, subscribe_mock):
self.client.post(self.url, {'read_dev_agreement':
datetime.datetime.now(),
'newsletter': True})
dt = self.get_user().read_dev_agreement
self.assertCloseToNow(dt)
eq_(UserNotification.objects.count(), 1)
notes = UserNotification.objects.filter(user=self.user, enabled=True,
notification_id=app_surveys.id)
eq_(notes.count(), 1, 'Expected to not be subscribed to newsletter')
subscribe_mock.assert_called_with(
self.user.email, 'app-dev', lang='en-US',
country='restofworld', format='H',
source_url='http://testserver/developers/submit')
def test_disagree(self):
r = self.client.post(self.url)
eq_(r.status_code, 200)
eq_(self.user.read_dev_agreement, None)
eq_(UserNotification.objects.count(), 0)
def test_read_dev_agreement_required(self):
f = mock.Mock()
f.__name__ = 'function'
request = mock.Mock()
request.user.read_dev_agreement = None
request.get_full_path.return_value = self.url
func = read_dev_agreement_required(f)
res = func(request)
assert not f.called
eq_(res.status_code, 302)
eq_(res['Location'], reverse('submit.app'))
class TestManifest(TestSubmit):
def setUp(self):
super(TestManifest, self).setUp()
self.user.update(read_dev_agreement=None)
self.url = reverse('submit.app')
def _step(self):
self.user.update(read_dev_agreement=datetime.datetime.now())
def test_anonymous(self):
r = self.client.get(self.url, follow=True)
eq_(r.context['step'], 'terms')
def test_cannot_skip_prior_step(self):
r = self.client.get(self.url, follow=True)
# And we start back at one...
self.assert3xx(r, reverse('submit.app.terms'))
def test_jump_to_step(self):
# I already read the Terms.
self._step()
# So jump me to the Manifest step.
r = self.client.get(reverse('submit.app'), follow=True)
eq_(r.context['step'], 'manifest')
def test_legacy_redirects(self):
def check():
for before, status in redirects:
r = self.client.get(before, follow=True)
self.assert3xx(r, dest, status)
# I haven't read the dev agreement.
redirects = (
('/developers/submit/', 302),
('/developers/submit/app', 302),
('/developers/submit/app/terms', 302),
('/developers/submit/app/manifest', 302),
)
dest = '/developers/submit/terms'
check()
# I have read the dev agreement.
self._step()
redirects = (
('/developers/submit/app', 302),
('/developers/submit/app/terms', 302),
('/developers/submit/app/manifest', 302),
('/developers/submit/manifest', 301),
)
dest = '/developers/submit/'
check()
def test_page(self):
self._step()
r = self.client.get(self.url)
eq_(r.status_code, 200)
eq_(pq(r.content)('#upload-file').length, 1)
def test_progress_display(self):
self._step()
self._test_progress_display(['terms'], 'manifest')
class UploadAddon(object):
def post(self, expect_errors=False, data=None):
if data is None:
data = {'free_platforms': ['free-desktop']}
data.update(upload=self.upload.pk)
response = self.client.post(self.url, data, follow=True)
eq_(response.status_code, 200)
if not expect_errors:
# Show any unexpected form errors.
if response.context and 'form' in response.context:
eq_(response.context['form'].errors, {})
return response
class BaseWebAppTest(BaseUploadTest, UploadAddon, TestCase):
fixtures = fixture('user_999', 'user_10482')
def setUp(self):
super(BaseWebAppTest, self).setUp()
self.manifest = self.manifest_path('mozball.webapp')
self.manifest_url = 'http://allizom.org/mozball.webapp'
self.upload = self.get_upload(abspath=self.manifest,
user=UserProfile.objects.get(pk=999))
self.upload.update(name=self.manifest_url)
self.url = reverse('submit.app')
self.login('regular@mozilla.com')
def post_addon(self, data=None):
eq_(Webapp.objects.count(), 0)
self.post(data=data)
return Webapp.objects.get()
class TestCreateWebApp(BaseWebAppTest):
@mock.patch('mkt.developers.tasks.fetch_icon')
def test_post_app_redirect(self, fi_mock):
r = self.post()
webapp = Webapp.objects.get()
self.assert3xx(r,
reverse('submit.app.details', args=[webapp.app_slug]))
assert fi_mock.delay.called, (
'The fetch_icon task was expected to be called')
def test_no_hint(self):
self.post_addon()
self.upload = self.get_upload(abspath=self.manifest)
r = self.client.post(reverse('mkt.developers.upload_manifest'),
dict(manifest=self.manifest_url), follow=True)
eq_(r.status_code, 200)
assert 'already submitted' not in r.content, (
'Unexpected helpful error (trap_duplicate)')
assert 'already exists' not in r.content, (
'Unexpected validation error (verify_app_domain)')
def test_no_upload(self):
data = {'free_platforms': ['free-desktop']}
res = self.client.post(self.url, data, follow=True)
eq_(res.context['form'].errors,
{'upload': NewWebappVersionForm.upload_error})
@mock.patch('mkt.developers.tasks.fetch_icon')
def test_bad_upload(self, fi_mock):
data = {'free_platforms': ['free-desktop'], 'upload': 'foo'}
res = self.client.post(self.url, data, follow=True)
eq_(res.context['form'].errors,
{'upload': NewWebappVersionForm.upload_error})
assert not fi_mock.delay.called, (
'The fetch_icon task was not expected to be called')
def test_hint_for_same_manifest(self):
self.create_switch(name='webapps-unique-by-domain')
self.post_addon()
self.upload = self.get_upload(abspath=self.manifest)
r = self.client.post(reverse('mkt.developers.upload_manifest'),
dict(manifest=self.manifest_url))
data = json.loads(r.content)
assert 'Oops' in data['validation']['messages'][0]['message'], (
'Expected oops')
def test_no_hint_for_same_manifest_different_author(self):
self.create_switch(name='webapps-unique-by-domain')
self.post_addon()
# Submit same manifest as different user.
self.login('clouserw@mozilla.com')
self.upload = self.get_upload(abspath=self.manifest)
r = self.client.post(reverse('mkt.developers.upload_manifest'),
dict(manifest=self.manifest_url))
data = json.loads(r.content)
eq_(data['validation']['messages'][0]['message'],
'An app already exists on this domain; only one app per domain is '
'allowed.')
def test_app_from_uploaded_manifest(self):
addon = self.post_addon()
eq_(addon.is_packaged, False)
assert addon.guid is not None, (
'Expected app to have a UUID assigned to guid')
eq_(unicode(addon.name), u'MozillaBall ょ')
eq_(addon.app_slug, u'mozillaball-ょ')
eq_(addon.description, u'Exciting Open Web development action!')
eq_(addon.manifest_url, u'http://allizom.org/mozball.webapp')
eq_(addon.app_domain, u'http://allizom.org')
eq_(Translation.objects.get(id=addon.description.id, locale='it'),
u'Azione aperta emozionante di sviluppo di fotoricettore!')
eq_(addon.latest_version.developer_name, 'Mozilla Labs')
eq_(addon.latest_version.manifest,
json.loads(open(self.manifest).read()))
def test_manifest_with_any_extension(self):
self.manifest = os.path.join(settings.ROOT, 'mkt', 'developers',
'tests', 'addons', 'mozball.owa')
self.upload = self.get_upload(abspath=self.manifest,
user=UserProfile.objects.get(pk=999))
addon = self.post_addon()
ok_(addon.id)
def test_version_from_uploaded_manifest(self):
addon = self.post_addon()
eq_(addon.latest_version.version, '1.0')
def test_file_from_uploaded_manifest(self):
addon = self.post_addon()
files = addon.latest_version.files.all()
eq_(len(files), 1)
eq_(files[0].status, mkt.STATUS_PENDING)
def test_set_platform(self):
app = self.post_addon(
{'free_platforms': ['free-android-tablet', 'free-desktop']})
self.assertSetEqual(app.device_types,
[mkt.DEVICE_TABLET, mkt.DEVICE_DESKTOP])
def test_free(self):
app = self.post_addon({'free_platforms': ['free-firefoxos']})
self.assertSetEqual(app.device_types, [mkt.DEVICE_GAIA])
eq_(app.premium_type, mkt.ADDON_FREE)
def test_premium(self):
app = self.post_addon({'paid_platforms': ['paid-firefoxos']})
self.assertSetEqual(app.device_types, [mkt.DEVICE_GAIA])
eq_(app.premium_type, mkt.ADDON_PREMIUM)
def test_supported_locales(self):
addon = self.post_addon()
eq_(addon.default_locale, 'en-US')
eq_(addon.versions.latest().supported_locales, 'es,it')
def test_short_locale(self):
# This manifest has a locale code of "zh" which is in the
# SHORTER_LANGUAGES setting and should get converted to "zh-CN".
self.manifest = self.manifest_path('short-locale.webapp')
self.upload = self.get_upload(abspath=self.manifest,
user=UserProfile.objects.get(pk=999))
addon = self.post_addon()
eq_(addon.default_locale, 'zh-CN')
eq_(addon.versions.latest().supported_locales, 'es')
def test_unsupported_detail_locale(self):
# This manifest has a locale code of "en-GB" which is unsupported, so
# we default to "en-US".
self.manifest = self.manifest_path('unsupported-default-locale.webapp')
self.upload = self.get_upload(abspath=self.manifest,
user=UserProfile.objects.get(pk=999))
addon = self.post_addon()
eq_(addon.default_locale, 'en-US')
eq_(addon.versions.latest().supported_locales, 'es,it')
def test_appfeatures_creation(self):
addon = self.post_addon(data={
'free_platforms': ['free-desktop'],
'has_contacts': 'on'
})
features = addon.latest_version.features
ok_(isinstance(features, AppFeatures))
field_names = [f.name for f in AppFeaturesForm().all_fields()]
for field in field_names:
expected = field == 'has_contacts'
eq_(getattr(features, field), expected)
class TestCreateWebAppFromManifest(BaseWebAppTest):
def setUp(self):
super(TestCreateWebAppFromManifest, self).setUp()
Webapp.objects.create(app_slug='xxx',
app_domain='http://existing-app.com')
def upload_webapp(self, manifest_url, **post_kw):
self.upload.update(name=manifest_url) # Simulate JS upload.
return self.post(**post_kw)
def post_manifest(self, manifest_url):
rs = self.client.post(reverse('mkt.developers.upload_manifest'),
dict(manifest=manifest_url))
if 'json' in rs['content-type']:
rs = json.loads(rs.content)
return rs
def test_duplicate_domain(self):
self.create_switch(name='webapps-unique-by-domain')
rs = self.upload_webapp('http://existing-app.com/my.webapp',
expect_errors=True)
eq_(rs.context['form'].errors,
{'upload':
['An app already exists on this domain; only one '
'app per domain is allowed.']})
def test_allow_duplicate_domains(self):
self.upload_webapp('http://existing-app.com/my.webapp') # No errors.
def test_duplicate_domain_from_js(self):
self.create_switch(name='webapps-unique-by-domain')
data = self.post_manifest('http://existing-app.com/my.webapp')
eq_(data['validation']['errors'], 1)
eq_(data['validation']['messages'][0]['message'],
'An app already exists on this domain; '
'only one app per domain is allowed.')
def test_allow_duplicate_domains_from_js(self):
rs = self.post_manifest('http://existing-app.com/my.webapp')
eq_(rs.status_code, 302)
class BasePackagedAppTest(BaseUploadTest, UploadAddon, TestCase):
fixtures = fixture('webapp_337141', 'user_999')
def setUp(self):
super(BasePackagedAppTest, self).setUp()
self.app = Webapp.objects.get(pk=337141)
self.app.update(is_packaged=True)
self.version = self.app.latest_version
self.file = self.version.all_files[0]
self.file.update(filename='mozball.zip')
self.upload = self.get_upload(
abspath=self.package,
user=UserProfile.objects.get(email='regular@mozilla.com'))
self.upload.update(name='mozball.zip')
self.url = reverse('submit.app')
self.login('regular@mozilla.com')
@property
def package(self):
return self.packaged_app_path('mozball.zip')
def post_addon(self, data=None):
eq_(Webapp.objects.count(), 1)
self.post(data=data)
return Webapp.objects.order_by('-id')[0]
def setup_files(self, filename='mozball.zip'):
# Make sure the source file is there.
# Original packaged file.
if not storage.exists(self.file.file_path):
try:
# We don't care if these dirs exist.
os.makedirs(os.path.dirname(self.file.file_path))
except OSError:
pass
shutil.copyfile(self.packaged_app_path(filename),
self.file.file_path)
# Signed packaged file.
if not storage.exists(self.file.signed_file_path):
try:
# We don't care if these dirs exist.
os.makedirs(os.path.dirname(self.file.signed_file_path))
except OSError:
pass
shutil.copyfile(self.packaged_app_path(filename),
self.file.signed_file_path)
class TestEscalatePrereleaseWebApp(BasePackagedAppTest):
def setUp(self):
super(TestEscalatePrereleaseWebApp, self).setUp()
user_factory(email=settings.NOBODY_EMAIL_ADDRESS)
def post(self):
super(TestEscalatePrereleaseWebApp, self).post(data={
'free_platforms': ['free-firefoxos'],
'packaged': True,
})
def test_prerelease_permissions_get_escalated(self):
validation = json.loads(self.upload.validation)
validation['permissions'] = ['moz-attention']
self.upload.update(validation=json.dumps(validation))
eq_(EscalationQueue.objects.count(), 0)
self.post()
eq_(EscalationQueue.objects.count(), 1)
def test_normal_permissions_dont_get_escalated(self):
validation = json.loads(self.upload.validation)
validation['permissions'] = ['contacts']
self.upload.update(validation=json.dumps(validation))
eq_(EscalationQueue.objects.count(), 0)
self.post()
eq_(EscalationQueue.objects.count(), 0)
class TestCreatePackagedApp(BasePackagedAppTest):
@mock.patch('mkt.webapps.models.Webapp.get_cached_manifest')
def test_post_app_redirect(self, _mock):
res = self.post()
webapp = Webapp.objects.order_by('-created')[0]
self.assert3xx(res,
reverse('submit.app.details', args=[webapp.app_slug]))
@mock.patch('mkt.webapps.models.Webapp.get_cached_manifest')
@mock.patch('mkt.submit.forms.verify_app_domain')
def test_app_from_uploaded_package(self, _verify, _mock):
addon = self.post_addon(
data={'packaged': True, 'free_platforms': ['free-firefoxos']})
eq_(addon.latest_version.version, '1.0')
eq_(addon.is_packaged, True)
assert addon.guid is not None, (
'Expected app to have a UUID assigned to guid')
eq_(unicode(addon.name), u'Packaged MozillaBall ょ')
eq_(addon.app_slug, u'packaged-mozillaball-ょ')
eq_(addon.description, u'Exciting Open Web development action!')
eq_(addon.manifest_url, None)
eq_(addon.app_domain, 'app://hy.fr')
eq_(Translation.objects.get(id=addon.description.id, locale='it'),
u'Azione aperta emozionante di sviluppo di fotoricettore!')
eq_(addon.latest_version.developer_name, 'Mozilla Labs')
assert _verify.called, (
'`verify_app_domain` should be called for packaged apps with '
'origins.')
@mock.patch('mkt.webapps.models.Webapp.get_cached_manifest')
def test_packaged_app_not_unique(self, _mock):
Webapp.objects.create(is_packaged=True, app_domain='app://hy.fr')
res = self.post(
data={'packaged': True, 'free_platforms': ['free-firefoxos']},
expect_errors=True)
eq_(res.context['form'].errors, {
'upload': ['An app already exists on this domain; only one app '
'per domain is allowed.']})
class TestDetails(TestSubmit):
fixtures = fixture('webapp_337141', 'user_999', 'user_10482')
def setUp(self):
super(TestDetails, self).setUp()
self.webapp = self.get_webapp()
self.webapp.update(status=mkt.STATUS_NULL)
self.url = reverse('submit.app.details', args=[self.webapp.app_slug])
self.cat1 = 'books'
def get_webapp(self):
return Webapp.objects.get(id=337141)
def upload_preview(self, image_file=None):
if not image_file:
image_file = get_image_path('preview.jpg')
return self._upload_image(self.webapp.get_dev_url('upload_preview'),
image_file=image_file)
def upload_icon(self, image_file=None):
if not image_file:
image_file = get_image_path('mozilla-sq.png')
return self._upload_image(self.webapp.get_dev_url('upload_icon'),
image_file=image_file)
def _upload_image(self, url, image_file):
with open(image_file, 'rb') as data:
rp = self.client.post(url, {'upload_image': data})
eq_(rp.status_code, 200)
hash_ = json.loads(rp.content)['upload_hash']
assert hash_, 'No hash: %s' % rp.content
return hash_
def _step(self):
self.user.update(read_dev_agreement=datetime.datetime.now())
self.cl = AppSubmissionChecklist.objects.create(
addon=self.webapp,
terms=True, manifest=True)
# Associate app with user.
AddonUser.objects.create(addon=self.webapp, user=self.user)
# Associate device type with app.
self.dtype = DEVICE_TYPES.values()[0]
AddonDeviceType.objects.create(addon=self.webapp,
device_type=self.dtype.id)
self.device_types = [self.dtype]
# Associate category with app.
self.webapp.update(categories=[self.cat1])
def test_anonymous(self):
self._test_anonymous()
def test_resume_later(self):
self._step()
self.webapp.appsubmissionchecklist.update(details=True)
r = self.client.get(reverse('submit.app.resume',
args=[self.webapp.app_slug]))
self.assert3xx(r, self.webapp.get_dev_url('edit'))
def test_not_owner(self):
self._step()
self.login('clouserw@mozilla.com')
eq_(self.client.get(self.url).status_code, 403)
def test_page(self):
self._step()
r = self.client.get(self.url)
eq_(r.status_code, 200)
eq_(pq(r.content)('#submit-details').length, 1)
def test_progress_display(self):
self._step()
self._test_progress_display(['terms', 'manifest'], 'details')
def new_preview_formset(self, *args, **kw):
ctx = self.client.get(self.url).context
blank = initial(ctx['form_previews'].forms[-1])
blank.update(**kw)
return blank
def preview_formset(self, *args, **kw):
kw.setdefault('initial_count', 0)
kw.setdefault('prefix', 'files')
fs = formset(*[a for a in args] + [self.new_preview_formset()], **kw)
return dict([(k, '' if v is None else v) for k, v in fs.items()])
def get_dict(self, **kw):
data = {
'app_slug': 'testname',
'description': 'desc',
'privacy_policy': 'XXX <script>alert("xss")</script>',
'homepage': 'http://www.goodreads.com/user/show/7595895-krupa',
'support_url': 'http://www.goodreads.com/user_challenges/351558',
'support_email': 'krupa+to+the+rescue@goodreads.com',
'categories': [self.cat1],
'flash': '1',
'publish_type': mkt.PUBLISH_IMMEDIATE,
'notes': 'yes'
}
# Add the required screenshot.
data.update(self.preview_formset({
'upload_hash': '<hash>',
'position': 0
}))
data.update(**kw)
# Remove fields without values.
data = dict((k, v) for k, v in data.iteritems() if v is not None)
return data
def check_dict(self, data=None, expected=None):
if data is None:
data = self.get_dict()
addon = self.get_webapp()
# Build a dictionary of expected results.
expected_data = {
'app_slug': 'testname',
'description': 'desc',
'privacy_policy': 'XXX <script>alert("xss")</script>',
'uses_flash': True,
'publish_type': mkt.PUBLISH_IMMEDIATE,
}
if expected:
expected_data.update(expected)
uses_flash = expected_data.pop('uses_flash')
eq_(addon.latest_version.all_files[0].uses_flash, uses_flash)
self.assertSetEqual(addon.device_types, self.device_types)
for field, expected in expected_data.iteritems():
got = unicode(getattr(addon, field))
expected = unicode(expected)
eq_(got, expected,
'Expected %r for %r. Got %r.' % (expected, field, got))
@mock.patch('mkt.submit.views.record_action')
def test_success(self, record_action):
self._step()
data = self.get_dict()
r = self.client.post(self.url, data)
self.assertNoFormErrors(r)
self.check_dict(data=data)
self.webapp = self.get_webapp()
self.assert3xx(r, self.get_url('done'))
eq_(self.webapp.status, mkt.STATUS_NULL)
assert record_action.called
def test_success_paid(self):
self._step()
self.webapp = self.get_webapp()
self.make_premium(self.webapp)
data = self.get_dict()
r = self.client.post(self.url, data)
self.assertNoFormErrors(r)
self.check_dict(data=data)
self.webapp = self.get_webapp()
self.assert3xx(r, self.get_url('done'))
eq_(self.webapp.status, mkt.STATUS_NULL)
eq_(self.webapp.highest_status, mkt.STATUS_PENDING)
def test_success_prefill_device_types_if_empty(self):
"""
The new submission flow asks for device types at step one.
This ensures that existing incomplete apps still have device
compatibility.
"""
self._step()
AddonDeviceType.objects.all().delete()
self.device_types = mkt.DEVICE_TYPES.values()
data = self.get_dict()
r = self.client.post(self.url, data)
self.assertNoFormErrors(r)
self.check_dict(data=data)
self.webapp = self.get_webapp()
self.assert3xx(r, self.get_url('done'))
def test_success_for_approved(self):
self._step()
data = self.get_dict(publish_type=mkt.PUBLISH_PRIVATE)
r = self.client.post(self.url, data)
self.assertNoFormErrors(r)
self.check_dict(data=data,
expected={'publish_type': mkt.PUBLISH_PRIVATE})
self.webapp = self.get_webapp()
self.assert3xx(r, self.get_url('done'))
def test_media_types(self):
self._step()
res = self.client.get(self.url)
doc = pq(res.content)
eq_(doc('.screenshot_upload').attr('data-allowed-types'),
'image/jpeg|image/png|video/webm')
eq_(doc('#id_icon_upload').attr('data-allowed-types'),
'image/jpeg|image/png')
def test_screenshot(self):
self._step()
im_hash = self.upload_preview()
data = self.get_dict()
data.update(self.preview_formset({
'upload_hash': im_hash,
'position': 0
}))
rp = self.client.post(self.url, data)
eq_(rp.status_code, 302)
ad = Webapp.objects.get(pk=self.webapp.pk)
eq_(ad.previews.all().count(), 1)
def test_icon(self):
self._step()
im_hash = self.upload_icon()
data = self.get_dict()
data['icon_upload_hash'] = im_hash
data['icon_type'] = 'image/png'
rp = self.client.post(self.url, data)
eq_(rp.status_code, 302)
ad = self.get_webapp()
eq_(ad.icon_type, 'image/png')
for size in mkt.CONTENT_ICON_SIZES:
fn = '%s-%s.png' % (ad.id, size)
assert os.path.exists(os.path.join(ad.get_icon_dir(), fn)), (
'Expected %s in %s' % (fn, os.listdir(ad.get_icon_dir())))
def test_screenshot_or_video_required(self):
self._step()
data = self.get_dict()
for k in data:
if k.startswith('files') and k.endswith('upload_hash'):
data[k] = ''
rp = self.client.post(self.url, data)
eq_(rp.context['form_previews'].non_form_errors(),
['You must upload at least one screenshot or video.'])
def test_unsaved_screenshot(self):
self._step()
# If there are form errors we should still pass the previews URIs.
preview_type = 'video/webm'
preview_uri = 'moz-filedata:p00p'
data = self.preview_formset({
'position': 1,
'upload_hash': '<hash_one>',
'unsaved_image_type': preview_type,
'unsaved_image_data': preview_uri
})
r = self.client.post(self.url, data)
eq_(r.status_code, 200)
form = pq(r.content)('form')
eq_(form.find('input[name=files-0-unsaved_image_type]').val(),
preview_type)
eq_(form.find('input[name=files-0-unsaved_image_data]').val(),
preview_uri)
def test_unique_allowed(self):
self._step()
r = self.client.post(self.url, self.get_dict(name=self.webapp.name))
self.assertNoFormErrors(r)
app = Webapp.objects.exclude(app_slug=self.webapp.app_slug)[0]
self.assert3xx(r, reverse('submit.app.done', args=[app.app_slug]))
eq_(self.get_webapp().status, mkt.STATUS_NULL)
def test_slug_invalid(self):
self._step()
# Submit an invalid slug.
d = self.get_dict(app_slug='slug!!! aksl23%%')
r = self.client.post(self.url, d)
eq_(r.status_code, 200)
self.assertFormError(
r, 'form_basic', 'app_slug',
"Enter a valid 'slug' consisting of letters, numbers, underscores "
"or hyphens.")
def test_slug_required(self):
self._step()
r = self.client.post(self.url, self.get_dict(app_slug=''))
eq_(r.status_code, 200)
self.assertFormError(r, 'form_basic', 'app_slug',
'This field is required.')
def test_description_required(self):
self._step()
r = self.client.post(self.url, self.get_dict(description=''))
eq_(r.status_code, 200)
self.assertFormError(r, 'form_basic', 'description',
'This field is required.')
def test_privacy_policy_required(self):
self._step()
r = self.client.post(self.url, self.get_dict(privacy_policy=None))
self.assertFormError(r, 'form_basic', 'privacy_policy',
'This field is required.')
def test_clashing_locale(self):
self.webapp.default_locale = 'de'
self.webapp.save()
self._step()
self.client.cookies['current_locale'] = 'en-us'
data = self.get_dict(name=None, name_de='Test name',
privacy_policy=None,
**{'privacy_policy_en-us': 'XXX'})
r = self.client.post(self.url, data)
self.assertNoFormErrors(r)
def test_homepage_url_optional(self):
self._step()
r = self.client.post(self.url, self.get_dict(homepage=None))
self.assertNoFormErrors(r)
def test_homepage_url_invalid(self):
self._step()
r = self.client.post(self.url, self.get_dict(homepage='xxx'))
self.assertFormError(r, 'form_basic', 'homepage', 'Enter a valid URL.')
def test_support_url_optional_if_email_present(self):
self._step()
r = self.client.post(self.url, self.get_dict(support_url=None))
self.assertNoFormErrors(r)
def test_support_url_invalid(self):
self._step()
r = self.client.post(self.url, self.get_dict(support_url='xxx'))
self.assertFormError(r, 'form_basic', 'support_url',
'Enter a valid URL.')
def test_support_email_optional_if_url_present(self):
self._step()
r = self.client.post(self.url, self.get_dict(support_email=None))
self.assertNoFormErrors(r)
def test_support_email_invalid(self):
self._step()
r = self.client.post(self.url, self.get_dict(support_email='xxx'))
self.assertFormError(r, 'form_basic', 'support_email',
'Enter a valid email address.')
def test_support_need_email_or_url(self):
self._step()
res = self.client.post(self.url, self.get_dict(support_email=None,
support_url=None))
self.assertFormError(
res, 'form_basic', 'support',
'You must provide either a website, an email, or both.')
ok_(pq(res.content)('#support-fields .error #trans-support_url'))
ok_(pq(res.content)('#support-fields .error #trans-support_email'))
# While the inputs will get the error styles, there is no need for an
# individual error message on each, the hint on the parent is enough.
eq_(pq(res.content)('#support-fields .error .errorlist').text(), '')
def test_categories_required(self):
self._step()
r = self.client.post(self.url, self.get_dict(categories=[]))
eq_(r.context['form_cats'].errors['categories'],
['This field is required.'])
def test_categories_max(self):
self._step()
eq_(mkt.MAX_CATEGORIES, 2)
cat2 = 'games'
cat3 = 'social'
cats = [self.cat1, cat2, cat3]
r = self.client.post(self.url, self.get_dict(categories=cats))
eq_(r.context['form_cats'].errors['categories'],
['You can have only 2 categories.'])
def _post_cats(self, cats):
self.client.post(self.url, self.get_dict(categories=cats))
eq_(sorted(self.get_webapp().categories), sorted(cats))
def test_categories_add(self):
self._step()
cat2 = 'games'
self._post_cats([self.cat1, cat2])
def test_categories_add_and_remove(self):
self._step()
cat2 = 'games'
self._post_cats([cat2])
def test_categories_remove(self):
# Add another category here so it gets added to the initial formset.
cat2 = 'games'
self.webapp.update(categories=[self.cat1, cat2])
self._step()
# `cat2` should get removed.
self._post_cats([self.cat1])
class TestDone(TestSubmit):
fixtures = fixture('user_999', 'webapp_337141')
def setUp(self):
super(TestDone, self).setUp()
self.webapp = self.get_webapp()
self.url = reverse('submit.app.done', args=[self.webapp.app_slug])
def get_webapp(self):
return Webapp.objects.get(id=337141)
def _step(self, **kw):
data = dict(addon=self.webapp, terms=True, manifest=True,
details=True)
data.update(kw)
self.cl = AppSubmissionChecklist.objects.create(**data)
AddonUser.objects.create(addon=self.webapp, user=self.user)
def test_anonymous(self):
self._test_anonymous()
def test_progress_display(self):
self._step()
self._test_progress_display(['terms', 'manifest', 'details'],
'next_steps')
def test_done(self):
self._step()
res = self.client.get(self.url)
eq_(res.status_code, 200)
class TestNextSteps(TestCase):
# TODO: Delete this test suite once we deploy IARC.
fixtures = fixture('user_999', 'webapp_337141')
def setUp(self):
self.user = UserProfile.objects.get(email='regular@mozilla.com')
self.login(self.user.email)
self.webapp = Webapp.objects.get(id=337141)
self.webapp.update(status=mkt.STATUS_PENDING)
self.url = reverse('submit.app.done', args=[self.webapp.app_slug])
def test_200(self, **kw):
data = dict(addon=self.webapp, terms=True, manifest=True,
details=True)
data.update(kw)
self.cl = AppSubmissionChecklist.objects.create(**data)
AddonUser.objects.create(addon=self.webapp, user=self.user)
res = self.client.get(self.url)
eq_(res.status_code, 200)
|
eviljeff/zamboni
|
mkt/submit/tests/test_views.py
|
Python
|
bsd-3-clause
| 38,831
|
import json
from djpcms import test
from djpcms.plugins.text import Text
class Editing(test.TestCase):
def setUp(self):
super(Editing,self).setUp()
p = self.get()['page']
p.set_template(p.create_template('thre-columns',
'{{ content0 }} {{ content1 }} {{ content2 }}',
'left,center,right'))
for pr in range(0,5):
p.add_plugin(Text,0)
p.add_plugin(Text,1)
p.add_plugin(Text,2)
def postdata(self):
return {self.sites.settings.HTML_CLASSES.post_view_key:'rearrange'}
def geturl(self, block):
return '{0}{1}/{2}/{3}/'.format(self.sites.settings.CONTENT_INLINE_EDITING['pagecontent'],
block.page.id,
block.block,
block.position)
def _getcontent(self, block, toblock):
'''Do as jQuery does'''
data = self.postdata()
if toblock.position:
if toblock.position <= block.position:
toblockp = self.get_block(toblock.block,toblock.position-1)
else:
toblockp = toblock
data['previous'] = toblockp.htmlid()
else:
data['next'] = toblock.htmlid()
self.assertTrue(self.login())
url = self.geturl(block)
res = self.post(url, data = data, response = True, ajax = True)
return json.loads(res.content)
def get_block(self, blocknum, position):
'''Get a content block from page and perform sanity check'''
p = self.get()['page']
block = p.get_block(blocknum,position)
self.assertEqual(block.block,blocknum)
self.assertEqual(block.position,position)
return block
def testLayout(self):
p = self.get()['page']
self.assertEqual(p.numblocks(),3)
def testRearrangeSame(self):
block = self.get_block(2,3)
content = self._getcontent(block,block)
self.assertEqual(content['header'],'empty')
def testRearrangeSame0(self):
block = self.get_block(1,0)
content = self._getcontent(block,block)
self.assertEqual(content['header'],'empty')
def testRearrange3to1SameBlock(self):
block = self.get_block(2,3)
toblock = self.get_block(2,1)
content = self._getcontent(block,toblock)
self.assertEqual(content['header'],'attribute')
data = content['body']
ids = dict(((el['selector'],el['value']) for el in data))
self.assertTrue(ids['#'+block.htmlid()],toblock.htmlid())
self.assertTrue(ids['#'+toblock.htmlid()],block.htmlid())
def testRearrange3to0SameBlock(self):
block = self.get_block(2,3)
toblock = self.get_block(2,0)
content = self._getcontent(block,toblock)
self.assertEqual(content['header'],'attribute')
data = content['body']
ids = dict(((el['selector'],el['value']) for el in data))
self.assertTrue(ids['#'+block.htmlid()],toblock.htmlid())
self.assertTrue(ids['#'+toblock.htmlid()],block.htmlid())
def testRearrange1to4SameBlock(self):
block = self.get_block(2,1)
toblock = self.get_block(2,4)
content = self._getcontent(block,toblock)
self.assertEqual(content['header'],'attribute')
data = content['body']
ids = dict(((el['selector'],el['value']) for el in data))
self.assertTrue(ids['#'+block.htmlid()],toblock.htmlid())
self.assertTrue(ids['#'+toblock.htmlid()],block.htmlid())
def testRearrangeDifferentBlock(self):
block = self.get_block(2,3)
toblock = self.get_block(0,1)
content = self._getcontent(block,toblock)
self.assertEqual(content['header'],'attribute')
data = content['body']
|
strogo/djpcms
|
tests/regression/editing/tests.py
|
Python
|
bsd-3-clause
| 4,078
|
default_app_config = 'wagtail_commons.core.apps.WagtailCommonsCoreConfig'
|
bgrace/wagtail-commons
|
wagtail_commons/core/__init__.py
|
Python
|
bsd-3-clause
| 73
|
"""
Filename: robustlq.py
Authors: Chase Coleman, Spencer Lyon, Thomas Sargent, John Stachurski
Solves robust LQ control problems.
"""
from __future__ import division # Remove for Python 3.sx
import numpy as np
from lqcontrol import LQ
from quadsums import var_quadratic_sum
from numpy import dot, log, sqrt, identity, hstack, vstack, trace
from scipy.linalg import solve, inv, det, solve_discrete_lyapunov
class RBLQ:
"""
Provides methods for analysing infinite horizon robust LQ control
problems of the form
min_{u_t} sum_t beta^t {x_t' R x_t + u'_t Q u_t }
subject to
x_{t+1} = A x_t + B u_t + C w_{t+1}
and with model misspecification parameter theta.
"""
def __init__(self, Q, R, A, B, C, beta, theta):
"""
Sets up the robust control problem.
Parameters
==========
Q, R : array_like, dtype = float
The matrices R and Q from the objective function
A, B, C : array_like, dtype = float
The matrices A, B, and C from the state space system
beta, theta : scalar, float
The discount and robustness factors in the robust control problem
We assume that
* R is n x n, symmetric and nonnegative definite
* Q is k x k, symmetric and positive definite
* A is n x n
* B is n x k
* C is n x j
"""
# == Make sure all matrices can be treated as 2D arrays == #
A, B, C, Q, R = map(np.atleast_2d, (A, B, C, Q, R))
self.A, self.B, self.C, self.Q, self.R = A, B, C, Q, R
# == Record dimensions == #
self.k = self.Q.shape[0]
self.n = self.R.shape[0]
self.j = self.C.shape[1]
# == Remaining parameters == #
self.beta, self.theta = beta, theta
def d_operator(self, P):
"""
The D operator, mapping P into
D(P) := P + PC(theta I - C'PC)^{-1} C'P.
Parameters
==========
P : array_like
A self.n x self.n array
"""
C, theta = self.C, self.theta
I = np.identity(self.j)
S1 = dot(P, C)
S2 = dot(C.T, S1)
return P + dot(S1, solve(theta * I - S2, S1.T))
def b_operator(self, P):
"""
The B operator, mapping P into
B(P) := R - beta^2 A'PB (Q + beta B'PB)^{-1} B'PA + beta A'PA
and also returning
F := (Q + beta B'PB)^{-1} beta B'PA
Parameters
==========
P : array_like
An self.n x self.n array
"""
A, B, Q, R, beta = self.A, self.B, self.Q, self.R, self.beta
S1 = Q + beta * dot(B.T, dot(P, B))
S2 = beta * dot(B.T, dot(P, A))
S3 = beta * dot(A.T, dot(P, A))
F = solve(S1, S2)
new_P = R - dot(S2.T, solve(S1, S2)) + S3
return F, new_P
def robust_rule(self):
"""
This method solves the robust control problem by tricking it into a
stacked LQ problem, as described in chapter 2 of Hansen-Sargent's
text "Robustness." The optimal control with observed state is
u_t = - F x_t
And the value function is -x'Px
Returns
=======
F : array_like, dtype = float
The optimal control matrix from above above
P : array_like, dtype = float
The psoitive semi-definite matrix defining the value function
K : array_like, dtype = float
the worst-case shock matrix K, where :math:`w_{t+1} = K x_t` is
the worst case shock
"""
# == Simplify names == #
A, B, C, Q, R = self.A, self.B, self.C, self.Q, self.R
beta, theta = self.beta, self.theta
k, j = self.k, self.j
# == Set up LQ version == #
I = identity(j)
Z = np.zeros((k, j))
Ba = hstack([B, C])
Qa = vstack([hstack([Q, Z]), hstack([Z.T, -beta*I*theta])])
lq = LQ(Qa, R, A, Ba, beta=beta)
# == Solve and convert back to robust problem == #
P, f, d = lq.stationary_values()
F = f[:k, :]
K = -f[k:f.shape[0], :]
return F, K, P
def robust_rule_simple(self, P_init=None, max_iter=80, tol=1e-8):
"""
A simple algorithm for computing the robust policy F and the
corresponding value function P, based around straightforward
iteration with the robust Bellman operator. This function is easier
to understand but one or two orders of magnitude slower than
self.robust_rule(). For more information see the docstring of that
method.
"""
# == Simplify names == #
A, B, C, Q, R = self.A, self.B, self.C, self.Q, self.R
beta, theta = self.beta, self.theta
# == Set up loop == #
P = np.zeros((self.n, self.n)) if not P_init else P_init
iterate, e = 0, tol + 1
while iterate < max_iter and e > tol:
F, new_P = self.b_operator(self.d_operator(P))
e = np.sqrt(np.sum((new_P - P)**2))
iterate += 1
P = new_P
I = np.identity(self.j)
S1 = P.dot(C)
S2 = C.T.dot(S1)
K = inv(theta * I - S2).dot(S1.T).dot(A - B.dot(F))
return F, K, P
def F_to_K(self, F):
"""
Compute agent 2's best cost-minimizing response K, given F.
Parameters
==========
F : array_like
A self.k x self.n array
Returns
=======
K : array_like, dtype = float
P : array_like, dtype = float
"""
Q2 = self.beta * self.theta
R2 = - self.R - dot(F.T, dot(self.Q, F))
A2 = self.A - dot(self.B, F)
B2 = self.C
lq = LQ(Q2, R2, A2, B2, beta=self.beta)
P, neg_K, d = lq.stationary_values()
return - neg_K, P
def K_to_F(self, K):
"""
Compute agent 1's best value-maximizing response F, given K.
Parameters
==========
K : array_like
A self.j x self.n array
Returns
=======
F : array_like, dtype = float
P : array_like, dtype = float
"""
A1 = self.A + dot(self.C, K)
B1 = self.B
Q1 = self.Q
R1 = self.R - self.beta * self.theta * dot(K.T, K)
lq = LQ(Q1, R1, A1, B1, beta=self.beta)
P, F, d = lq.stationary_values()
return F, P
def compute_deterministic_entropy(self, F, K, x0):
"""
Given K and F, compute the value of deterministic entropy, which is
sum_t beta^t x_t' K'K x_t with x_{t+1} = (A - BF + CK) x_t.
"""
H0 = dot(K.T, K)
C0 = np.zeros((self.n, 1))
A0 = self.A - dot(self.B, F) + dot(self.C, K)
e = var_quadratic_sum(A0, C0, H0, self.beta, x0)
return e
def evaluate_F(self, F):
"""
Given a fixed policy F, with the interpretation u = -F x, this
function computes the matrix P_F and constant d_F associated with
discounted cost J_F(x) = x' P_F x + d_F.
Parameters
==========
F : array_like
A self.k x self.n array
Returns
=======
P_F : array_like, dtype = float
Matrix for discounted cost
d_F : scalar
Constant for discounted cost
K_F : array_like, dtype = float
Worst case policy
O_F : array_like, dtype = float
Matrix for discounted entropy
o_F : scalar
Constant for discounted entropy
"""
# == Simplify names == #
Q, R, A, B, C = self.Q, self.R, self.A, self.B, self.C
beta, theta = self.beta, self.theta
# == Solve for policies and costs using agent 2's problem == #
K_F, neg_P_F = self.F_to_K(F)
P_F = - neg_P_F
I = np.identity(self.j)
H = inv(I - C.T.dot(P_F.dot(C)) / theta)
d_F = log(det(H))
# == Compute O_F and o_F == #
sig = -1.0 / theta
AO = sqrt(beta) * (A - dot(B, F) + dot(C, K_F))
O_F = solve_discrete_lyapunov(AO.T, beta * dot(K_F.T, K_F))
ho = (trace(H - 1) - d_F) / 2.0
tr = trace(dot(O_F, C.dot(H.dot(C.T))))
o_F = (ho + beta * tr) / (1 - beta)
return K_F, P_F, d_F, O_F, o_F
|
28ideas/quant-econ
|
quantecon/robustlq.py
|
Python
|
bsd-3-clause
| 8,434
|
from omnibus.factories import websocket_connection_factory
def mousemove_connection_factory(auth_class, pubsub):
class GeneratedConnection(websocket_connection_factory(auth_class, pubsub)):
def close_connection(self):
self.pubsub.publish(
'mousemoves', 'disconnect',
sender=self.authenticator.get_identifier()
)
return super(GeneratedConnection, self).close_connection()
return GeneratedConnection
|
moccu/django-omnibus
|
examples/mousemove/example_project/connection.py
|
Python
|
bsd-3-clause
| 485
|
# -*- coding: utf-8 -*-
from reports.accidents.models import *
|
k-vinogradov/noclite
|
reports/models.py
|
Python
|
bsd-3-clause
| 66
|
"""
Client for the library API.
"""
class LibraryClient(object):
"""
Library API client.
"""
def __init__(self,axilent_connection):
self.content_resource = axilent_connection.resource_client('axilent.library','content')
self.api = axilent_connection.http_client('axilent.library')
def create_content(self,content_type,project,search_index=True,**field_data):
"""
Creates the content. Returns the new content item key in the format:
<content-type>:<content-key>
"""
response = self.content_resource.post(data={'content_type':content_type,
'project':project,
'search_index':search_index,
'content':field_data})
return response['created_content']
def update_content(self,content_type,project,content_key,search_index=True,reset_workflow=True,**field_data):
"""
Updates existing content.
"""
response = self.content_resource.put(data={'content_type':content_type,
'project':project,
'key':content_key,
'search_index':search_index,
'reset_workflow':reset_workflow,
'content':field_data})
return response['updated_content']
def ping(self,project,content_type):
"""
Tests connection with Axilent.
"""
return self.api.ping(project=project,content_type=content_type)
def index_content(self,project,content_type,content_key):
"""
Forces re-indexing of the specified content item.
"""
response = self.api.indexcontent(content_key=content_key,
project=project,
content_type=content_type)
return response['indexed']
def tag_content(self,project,content_type,content_key,tag,search_index=True):
"""
Tags the specified content item.
"""
response = self.api.tagcontent(project=project,
content_type=content_type,
content_key=content_key,
tag=tag,
search_index=search_index)
return response['tagged_content']
def detag_content(self,project,content_type,content_key,tag,search_index=True):
"""
De-tags the specified content item.
"""
response = self.api.detagcontent(project=project,
content_type=content_type,
content_key=content_key,
tag=tag,
search_index=search_index)
return response['removed_tag']
def archive_content(self,project,content_type,content_key):
"""
Archives the content on Axilent.
"""
response = self.content_resource.delete(params={'content_type':content_type,
'project':project,
'key':content_key})
return response['archived']
|
aericson/Djax
|
pax/library.py
|
Python
|
bsd-3-clause
| 3,541
|
from __future__ import print_function, absolute_import, division
import numpy as np
import pandas as pd
from toolz import partition
def loc(df, ind):
return df.loc[ind]
def index_count(x):
# Workaround since Index doesn't implement `.count`
return pd.notnull(x).sum()
def mean_aggregate(s, n):
try:
return s / n
except ZeroDivisionError:
return np.nan
def var_aggregate(x2, x, n, ddof):
try:
result = (x2 / n) - (x / n)**2
if ddof != 0:
result = result * n / (n - ddof)
return result
except ZeroDivisionError:
return np.nan
def describe_aggregate(values):
assert len(values) == 6
count, mean, std, min, q, max = values
typ = pd.DataFrame if isinstance(count, pd.Series) else pd.Series
part1 = typ([count, mean, std, min],
index=['count', 'mean', 'std', 'min'])
q.index = ['25%', '50%', '75%']
part3 = typ([max], index=['max'])
return pd.concat([part1, q, part3])
def cummin_aggregate(x, y):
if isinstance(x, (pd.Series, pd.DataFrame)):
return x.where((x < y) | x.isnull(), y, axis=x.ndim - 1)
else: # scalar
return x if x < y else y
def cummax_aggregate(x, y):
if isinstance(x, (pd.Series, pd.DataFrame)):
return x.where((x > y) | x.isnull(), y, axis=x.ndim - 1)
else: # scalar
return x if x > y else y
def assign(df, *pairs):
kwargs = dict(partition(2, pairs))
return df.assign(**kwargs)
def unique(x, series_name=None):
# unique returns np.ndarray, it must be wrapped
return pd.Series(pd.Series.unique(x), name=series_name)
def value_counts_aggregate(x):
return x.groupby(level=0).sum().sort_values(ascending=False)
def nbytes(x):
return x.nbytes
|
cowlicks/dask
|
dask/dataframe/methods.py
|
Python
|
bsd-3-clause
| 1,788
|
#-*- coding: utf-8 -*-
from __future__ import unicode_literals
import itertools
import os
import re
from django import forms
from django.conf import settings as django_settings
from django.contrib import messages
from django.contrib.admin import helpers
from django.contrib.admin.util import quote, unquote, capfirst
from django.core.exceptions import ValidationError
from django.core.exceptions import PermissionDenied
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core.urlresolvers import reverse
from django.db import router, models
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render, get_object_or_404
try:
from django.utils.encoding import force_text
except ImportError:
# Django < 1.5
from django.utils.encoding import force_unicode as force_text
from django.utils.html import escape
from django.utils.http import urlquote
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.utils.translation import ungettext, ugettext_lazy
from filer import settings
from filer.admin.forms import (CopyFilesAndFoldersForm, ResizeImagesForm,
RenameFilesForm)
from filer.admin.permissions import PrimitivePermissionAwareModelAdmin
from filer.admin.patched.admin_utils import get_deleted_objects
from filer.admin.tools import (userperms_for_request,
check_folder_edit_permissions,
check_files_edit_permissions,
check_files_read_permissions,
check_folder_read_permissions,
admin_each_context)
from filer.models import (Folder, FolderRoot, UnfiledImages, File, tools,
ImagesWithMissingData, FolderPermission, Image)
from filer.settings import FILER_STATICMEDIA_PREFIX, FILER_PAGINATE_BY
from filer.thumbnail_processors import normalize_subject_location
from filer.utils.compatibility import get_delete_permission
from filer.utils.filer_easy_thumbnails import FilerActionThumbnailer
from filer.views import (popup_status, popup_param, selectfolder_status,
selectfolder_param)
class AddFolderPopupForm(forms.ModelForm):
folder = forms.HiddenInput()
class Meta:
model = Folder
fields = ('name',)
class FolderAdmin(PrimitivePermissionAwareModelAdmin):
list_display = ('name',)
exclude = ('parent',)
list_per_page = 20
list_filter = ('owner',)
search_fields = ['name', 'files__name']
raw_id_fields = ('owner',)
save_as = True # see ImageAdmin
actions = ['move_to_clipboard', 'files_set_public', 'files_set_private',
'delete_files_or_folders', 'move_files_and_folders',
'copy_files_and_folders', 'resize_images', 'rename_files']
directory_listing_template = 'admin/filer/folder/directory_listing.html'
order_by_file_fields = ('_file_size', 'original_filename', 'name', 'owner',
'uploaded_at', 'modified_at')
def get_form(self, request, obj=None, **kwargs):
"""
Returns a Form class for use in the admin add view. This is used by
add_view and change_view.
"""
parent_id = request.GET.get('parent_id', None)
if not parent_id:
parent_id = request.POST.get('parent_id', None)
if parent_id:
return AddFolderPopupForm
else:
folder_form = super(FolderAdmin, self).get_form(
request, obj=None, **kwargs)
def folder_form_clean(form_obj):
cleaned_data = form_obj.cleaned_data
folders_with_same_name = Folder.objects.filter(
parent=form_obj.instance.parent,
name=cleaned_data['name'])
if form_obj.instance.pk:
folders_with_same_name = folders_with_same_name.exclude(
pk=form_obj.instance.pk)
if folders_with_same_name.exists():
raise ValidationError('Folder with this name already exists.')
return cleaned_data
# attach clean to the default form rather than defining a new form class
folder_form.clean = folder_form_clean
return folder_form
def save_form(self, request, form, change):
"""
Given a ModelForm return an unsaved instance. ``change`` is True if
the object is being changed, and False if it's being added.
"""
r = form.save(commit=False)
parent_id = request.GET.get('parent_id', None)
if not parent_id:
parent_id = request.POST.get('parent_id', None)
if parent_id:
parent = Folder.objects.get(id=parent_id)
r.parent = parent
return r
def response_change(self, request, obj):
"""
Overrides the default to be able to forward to the directory listing
instead of the default change_list_view
"""
r = super(FolderAdmin, self).response_change(request, obj)
## Code borrowed from django ModelAdmin to determine changelist on the fly
if r['Location']:
# it was a successful save
if (r['Location'] in ['../'] or
r['Location'] == self._get_post_url(obj)):
if obj.parent:
url = reverse('admin:filer-directory_listing',
kwargs={'folder_id': obj.parent.id})
else:
url = reverse('admin:filer-directory_listing-root')
url = "%s%s%s" % (url,popup_param(request),
selectfolder_param(request,"&"))
return HttpResponseRedirect(url)
else:
# this means it probably was a save_and_continue_editing
pass
return r
def render_change_form(self, request, context, add=False, change=False,
form_url='', obj=None):
extra_context = {'show_delete': True,
'is_popup': popup_status(request),
'select_folder': selectfolder_status(request),}
context.update(extra_context)
return super(FolderAdmin, self).render_change_form(
request=request, context=context, add=False,
change=False, form_url=form_url, obj=obj)
def delete_view(self, request, object_id, extra_context=None):
"""
Overrides the default to enable redirecting to the directory view after
deletion of a folder.
we need to fetch the object and find out who the parent is
before super, because super will delete the object and make it
impossible to find out the parent folder to redirect to.
"""
parent_folder = None
try:
obj = self.get_queryset(request).get(pk=unquote(object_id))
parent_folder = obj.parent
except self.model.DoesNotExist:
obj = None
r = super(FolderAdmin, self).delete_view(
request=request, object_id=object_id,
extra_context=extra_context)
url = r.get("Location", None)
if url in ["../../../../", "../../"] or url == self._get_post_url(obj):
if parent_folder:
url = reverse('admin:filer-directory_listing',
kwargs={'folder_id': parent_folder.id})
else:
url = reverse('admin:filer-directory_listing-root')
url = "%s%s%s" % (url, popup_param(request),
selectfolder_param(request,"&"))
return HttpResponseRedirect(url)
return r
def icon_img(self, xs):
return mark_safe(('<img src="%simg/icons/plainfolder_32x32.png" ' + \
'alt="Folder Icon" />') % FILER_STATICMEDIA_PREFIX)
icon_img.allow_tags = True
def get_urls(self):
from django.conf.urls import patterns, url
urls = super(FolderAdmin, self).get_urls()
from filer import views
url_patterns = patterns('',
# we override the default list view with our own directory listing
# of the root directories
url(r'^$',
self.admin_site.admin_view(self.directory_listing),
name='filer-directory_listing-root'),
url(r'^last/$',
self.admin_site.admin_view(self.directory_listing),
{'viewtype': 'last'},
name='filer-directory_listing-last'),
url(r'^(?P<folder_id>\d+)/list/$',
self.admin_site.admin_view(self.directory_listing),
name='filer-directory_listing'),
url(r'^(?P<folder_id>\d+)/make_folder/$',
self.admin_site.admin_view(views.make_folder),
name='filer-directory_listing-make_folder'),
url(r'^make_folder/$',
self.admin_site.admin_view(views.make_folder),
name='filer-directory_listing-make_root_folder'),
url(r'^images_with_missing_data/$',
self.admin_site.admin_view(self.directory_listing),
{'viewtype': 'images_with_missing_data'},
name='filer-directory_listing-images_with_missing_data'),
url(r'^unfiled_images/$',
self.admin_site.admin_view(self.directory_listing),
{'viewtype': 'unfiled_images'},
name='filer-directory_listing-unfiled_images'),
)
url_patterns.extend(urls)
return url_patterns
# custom views
def directory_listing(self, request, folder_id=None, viewtype=None):
clipboard = tools.get_user_clipboard(request.user)
if viewtype == 'images_with_missing_data':
folder = ImagesWithMissingData()
elif viewtype == 'unfiled_images':
folder = UnfiledImages()
elif viewtype == 'last':
last_folder_id = request.session.get('filer_last_folder_id')
try:
Folder.objects.get(id=last_folder_id)
except Folder.DoesNotExist:
url = reverse('admin:filer-directory_listing-root')
url = "%s%s%s" % (url, popup_param(request), selectfolder_param(request,"&"))
else:
url = reverse('admin:filer-directory_listing', kwargs={'folder_id': last_folder_id})
url = "%s%s%s" % (url, popup_param(request), selectfolder_param(request,"&"))
return HttpResponseRedirect(url)
elif folder_id is None:
folder = FolderRoot()
else:
folder = get_object_or_404(Folder, id=folder_id)
request.session['filer_last_folder_id'] = folder_id
# Check actions to see if any are available on this changelist
actions = self.get_actions(request)
# Remove action checkboxes if there aren't any actions available.
list_display = list(self.list_display)
if not actions:
try:
list_display.remove('action_checkbox')
except ValueError:
pass
# search
q = request.GET.get('q', None)
if q:
search_terms = unquote(q).split(" ")
else:
search_terms = []
q = ''
limit_search_to_folder = request.GET.get('limit_search_to_folder',
False) in (True, 'on')
if len(search_terms) > 0:
if folder and limit_search_to_folder and not folder.is_root:
folder_qs = folder.get_descendants()
file_qs = File.objects.filter(
folder__in=folder.get_descendants())
else:
folder_qs = Folder.objects.all()
file_qs = File.objects.all()
folder_qs = self.filter_folder(folder_qs, search_terms)
file_qs = self.filter_file(file_qs, search_terms)
show_result_count = True
else:
folder_qs = folder.children.all()
file_qs = folder.files.all()
show_result_count = False
folder_qs = folder_qs.order_by('name')
order_by = request.GET.get('order_by', None)
if order_by is not None:
order_by = order_by.split(',')
order_by = [field for field in order_by
if re.sub(r'^-', '', field) in self.order_by_file_fields]
if len(order_by) > 0:
file_qs = file_qs.order_by(*order_by)
folder_children = []
folder_files = []
if folder.is_root:
folder_children += folder.virtual_folders
perms = FolderPermission.objects.get_read_id_list(request.user)
root_exclude_kw = {'parent__isnull': False, 'parent__id__in': perms}
if perms != 'All':
file_qs = file_qs.filter(models.Q(folder__id__in=perms) | models.Q(owner=request.user))
folder_qs = folder_qs.filter(models.Q(id__in=perms) | models.Q(owner=request.user))
else:
root_exclude_kw.pop('parent__id__in')
if folder.is_root:
folder_qs = folder_qs.exclude(**root_exclude_kw)
folder_children += folder_qs
folder_files += file_qs
try:
permissions = {
'has_edit_permission': folder.has_edit_permission(request),
'has_read_permission': folder.has_read_permission(request),
'has_add_children_permission': \
folder.has_add_children_permission(request),
}
except:
permissions = {}
if order_by is None or len(order_by) == 0:
folder_files.sort()
items = folder_children + folder_files
items_permissions = [(item, {'change': self.has_change_permission(request, item)}) for item in items]
paginator = Paginator(items_permissions, FILER_PAGINATE_BY)
# Are we moving to clipboard?
if request.method == 'POST' and '_save' not in request.POST:
for f in folder_files:
if "move-to-clipboard-%d" % (f.id,) in request.POST:
clipboard = tools.get_user_clipboard(request.user)
if f.has_edit_permission(request):
tools.move_file_to_clipboard([f], clipboard)
return HttpResponseRedirect(request.get_full_path())
else:
raise PermissionDenied
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
# Actions with no confirmation
if (actions and request.method == 'POST' and
'index' in request.POST and '_save' not in request.POST):
if selected:
response = self.response_action(request, files_queryset=file_qs, folders_queryset=folder_qs)
if response:
return response
else:
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
self.message_user(request, msg)
# Actions with confirmation
if (actions and request.method == 'POST' and
helpers.ACTION_CHECKBOX_NAME in request.POST and
'index' not in request.POST and '_save' not in request.POST):
if selected:
response = self.response_action(request, files_queryset=file_qs, folders_queryset=folder_qs)
if response:
return response
# Build the action form and populate it with available actions.
if actions:
action_form = self.action_form(auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
else:
action_form = None
selection_note_all = ungettext('%(total_count)s selected',
'All %(total_count)s selected', paginator.count)
# If page request (9999) is out of range, deliver last page of results.
try:
paginated_items = paginator.page(request.GET.get('page', 1))
except PageNotAnInteger:
paginated_items = paginator.page(1)
except EmptyPage:
paginated_items = paginator.page(paginator.num_pages)
context = admin_each_context(self.admin_site, request)
context.update({
'folder': folder,
'clipboard_files': File.objects.filter(
in_clipboards__clipboarditem__clipboard__user=request.user
).distinct(),
'paginator': paginator,
'paginated_items': paginated_items, # [(item, item_perms), ]
'permissions': permissions,
'permstest': userperms_for_request(folder, request),
'current_url': request.path,
'title': 'Directory listing for %s' % folder.name,
'search_string': ' '.join(search_terms),
'q': urlquote(q),
'show_result_count': show_result_count,
'limit_search_to_folder': limit_search_to_folder,
'is_popup': popup_status(request),
'select_folder': selectfolder_status(request),
# needed in the admin/base.html template for logout links
'root_path': reverse('admin:index'),
'action_form': action_form,
'actions_on_top': self.actions_on_top,
'actions_on_bottom': self.actions_on_bottom,
'actions_selection_counter': self.actions_selection_counter,
'selection_note': _('0 of %(cnt)s selected') % {'cnt': len(paginated_items.object_list)},
'selection_note_all': selection_note_all % {'total_count': paginator.count},
'media': self.media,
'enable_permissions': settings.FILER_ENABLE_PERMISSIONS,
'can_make_folder': request.user.is_superuser or \
(folder.is_root and settings.FILER_ALLOW_REGULAR_USERS_TO_ADD_ROOT_FOLDERS) or \
permissions.get("has_add_children_permission"),
})
return render(request, self.directory_listing_template, context)
def filter_folder(self, qs, terms=[]):
for term in terms:
filters = models.Q(name__icontains=term)
for filter_ in self.get_owner_filter_lookups():
filters |= models.Q(**{filter_: term})
qs = qs.filter(filters)
return qs
def filter_file(self, qs, terms=[]):
for term in terms:
filters = (models.Q(name__icontains=term) |
models.Q(description__icontains=term) |
models.Q(original_filename__icontains=term))
for filter_ in self.get_owner_filter_lookups():
filters |= models.Q(**{filter_: term})
qs = qs.filter(filters)
return qs
@property
def owner_search_fields(self):
"""
Returns all the fields that are CharFields except for password from the
User model. For the built-in User model, that means username,
first_name, last_name, and email.
"""
try:
from django.contrib.auth import get_user_model
except ImportError: # Django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
return [
field.name for field in User._meta.fields
if isinstance(field, models.CharField) and field.name != 'password'
]
def get_owner_filter_lookups(self):
return [
'owner__{field}__icontains'.format(field=field)
for field in self.owner_search_fields
]
def response_action(self, request, files_queryset, folders_queryset):
"""
Handle an admin action. This is called if a request is POSTed to the
changelist; it returns an HttpResponse if the action was handled, and
None otherwise.
"""
# There can be multiple action forms on the page (at the top
# and bottom of the change list, for example). Get the action
# whose button was pushed.
try:
action_index = int(request.POST.get('index', 0))
except ValueError:
action_index = 0
# Construct the action form.
data = request.POST.copy()
data.pop(helpers.ACTION_CHECKBOX_NAME, None)
data.pop("index", None)
# Use the action whose button was pushed
try:
data.update({'action': data.getlist('action')[action_index]})
except IndexError:
# If we didn't get an action from the chosen form that's invalid
# POST data, so by deleting action it'll fail the validation check
# below. So no need to do anything here
pass
action_form = self.action_form(data, auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
# If the form's valid we can handle the action.
if action_form.is_valid():
action = action_form.cleaned_data['action']
select_across = action_form.cleaned_data['select_across']
func, name, description = self.get_actions(request)[action]
# Get the list of selected PKs. If nothing's selected, we can't
# perform an action on it, so bail. Except we want to perform
# the action explicitly on all objects.
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
if not selected and not select_across:
# Reminder that something needs to be selected or nothing will happen
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
self.message_user(request, msg)
return None
if not select_across:
selected_files = []
selected_folders = []
for pk in selected:
if pk[:5] == "file-":
selected_files.append(pk[5:])
else:
selected_folders.append(pk[7:])
# Perform the action only on the selected objects
files_queryset = files_queryset.filter(pk__in=selected_files)
folders_queryset = folders_queryset.filter(pk__in=selected_folders)
response = func(self, request, files_queryset, folders_queryset)
# Actions may return an HttpResponse, which will be used as the
# response from the POST. If not, we'll be a good little HTTP
# citizen and redirect back to the changelist page.
if isinstance(response, HttpResponse):
return response
else:
return HttpResponseRedirect(request.get_full_path())
else:
msg = _("No action selected.")
self.message_user(request, msg)
return None
def get_actions(self, request):
actions = super(FolderAdmin, self).get_actions(request)
if 'delete_selected' in actions:
del actions['delete_selected']
return actions
def move_to_clipboard(self, request, files_queryset, folders_queryset):
"""
Action which moves the selected files and files in selected folders to clipboard.
"""
if not self.has_change_permission(request):
raise PermissionDenied
if request.method != 'POST':
return None
clipboard = tools.get_user_clipboard(request.user)
check_files_edit_permissions(request, files_queryset)
check_folder_edit_permissions(request, folders_queryset)
# TODO: Display a confirmation page if moving more than X files to clipboard?
files_count = [0] # We define it like that so that we can modify it inside the move_files function
def move_files(files):
files_count[0] += tools.move_file_to_clipboard(files, clipboard)
def move_folders(folders):
for f in folders:
move_files(f.files)
move_folders(f.children.all())
move_files(files_queryset)
move_folders(folders_queryset)
self.message_user(request, _("Successfully moved %(count)d files to clipboard.") % {
"count": files_count[0],
})
return None
move_to_clipboard.short_description = ugettext_lazy("Move selected files to clipboard")
def files_set_public_or_private(self, request, set_public, files_queryset, folders_queryset):
"""
Action which enables or disables permissions for selected files and files in selected folders to clipboard (set them private or public).
"""
if not self.has_change_permission(request):
raise PermissionDenied
if request.method != 'POST':
return None
check_files_edit_permissions(request, files_queryset)
check_folder_edit_permissions(request, folders_queryset)
files_count = [0] # We define it like that so that we can modify it inside the set_files function
def set_files(files):
for f in files:
if f.is_public != set_public:
f.is_public = set_public
f.save()
files_count[0] += 1
def set_folders(folders):
for f in folders:
set_files(f.files)
set_folders(f.children.all())
set_files(files_queryset)
set_folders(folders_queryset)
if set_public:
self.message_user(request, _("Successfully disabled permissions for %(count)d files.") % {
"count": files_count[0],
})
else:
self.message_user(request, _("Successfully enabled permissions for %(count)d files.") % {
"count": files_count[0],
})
return None
def files_set_private(self, request, files_queryset, folders_queryset):
return self.files_set_public_or_private(request, False, files_queryset, folders_queryset)
files_set_private.short_description = ugettext_lazy("Enable permissions for selected files")
def files_set_public(self, request, files_queryset, folders_queryset):
return self.files_set_public_or_private(request, True, files_queryset, folders_queryset)
files_set_public.short_description = ugettext_lazy("Disable permissions for selected files")
def delete_files_or_folders(self, request, files_queryset, folders_queryset):
"""
Action which deletes the selected files and/or folders.
This action first displays a confirmation page whichs shows all the
deleteable files and/or folders, or, if the user has no permission on one of the related
childs (foreignkeys), a "permission denied" message.
Next, it delets all selected files and/or folders and redirects back to the folder.
"""
opts = self.model._meta
app_label = opts.app_label
# Check that the user has delete permission for the actual model
if not self.has_delete_permission(request):
raise PermissionDenied
current_folder = self._get_current_action_folder(request, files_queryset, folders_queryset)
all_protected = []
# Populate deletable_objects, a data structure of all related objects that
# will also be deleted.
# Hopefully this also checks for necessary permissions.
# TODO: Check if permissions are really verified
using = router.db_for_write(self.model)
deletable_files, perms_needed_files, protected_files = get_deleted_objects(files_queryset, files_queryset.model._meta, request.user, self.admin_site, using)
deletable_folders, perms_needed_folders, protected_folders = get_deleted_objects(folders_queryset, folders_queryset.model._meta, request.user, self.admin_site, using)
all_protected.extend(protected_files)
all_protected.extend(protected_folders)
all_deletable_objects = [deletable_files, deletable_folders]
all_perms_needed = perms_needed_files.union(perms_needed_folders)
# The user has already confirmed the deletion.
# Do the deletion and return a None to display the change list view again.
if request.POST.get('post'):
if all_perms_needed:
raise PermissionDenied
n = files_queryset.count() + folders_queryset.count()
if n:
# delete all explicitly selected files
for f in files_queryset:
self.log_deletion(request, f, force_text(f))
f.delete()
# delete all files in all selected folders and their children
# This would happen automatically by ways of the delete cascade, but then the individual .delete()
# methods won't be called and the files won't be deleted from the filesystem.
folder_ids = set()
for folder in folders_queryset:
folder_ids.add(folder.id)
folder_ids.update(folder.get_descendants().values_list('id', flat=True))
for f in File.objects.filter(folder__in=folder_ids):
self.log_deletion(request, f, force_text(f))
f.delete()
# delete all folders
for f in folders_queryset:
self.log_deletion(request, f, force_text(f))
f.delete()
self.message_user(request, _("Successfully deleted %(count)d files and/or folders.") % {
"count": n,
})
# Return None to display the change list page again.
return None
if all_perms_needed or all_protected:
title = _("Cannot delete files and/or folders")
else:
title = _("Are you sure?")
context = admin_each_context(self.admin_site, request)
context.update({
"title": title,
"instance": current_folder,
"breadcrumbs_action": _("Delete files and/or folders"),
"deletable_objects": all_deletable_objects,
"files_queryset": files_queryset,
"folders_queryset": folders_queryset,
"perms_lacking": all_perms_needed,
"protected": all_protected,
"opts": opts,
'is_popup': popup_status(request),
'select_folder': selectfolder_status(request),
"root_path": reverse('admin:index'),
"app_label": app_label,
"action_checkbox_name": helpers.ACTION_CHECKBOX_NAME,
})
# Display the destination folder selection page
return render(request, "admin/filer/delete_selected_files_confirmation.html", context)
delete_files_or_folders.short_description = ugettext_lazy("Delete selected files and/or folders")
# Copied from django.contrib.admin.util
def _format_callback(self, obj, user, admin_site, perms_needed):
has_admin = obj.__class__ in admin_site._registry
opts = obj._meta
if has_admin:
admin_url = reverse('%s:%s_%s_change'
% (admin_site.name,
opts.app_label,
opts.object_name.lower()),
None, (quote(obj._get_pk_val()),))
p = get_delete_permission(opts)
if not user.has_perm(p):
perms_needed.add(opts.verbose_name)
# Display a link to the admin page.
return mark_safe('%s: <a href="%s">%s</a>' %
(escape(capfirst(opts.verbose_name)),
admin_url,
escape(obj)))
else:
# Don't display link to edit, because it either has no
# admin or is edited inline.
return '%s: %s' % (capfirst(opts.verbose_name),
force_text(obj))
def _check_copy_perms(self, request, files_queryset, folders_queryset):
try:
check_files_read_permissions(request, files_queryset)
check_folder_read_permissions(request, folders_queryset)
except PermissionDenied:
return True
return False
def _check_move_perms(self, request, files_queryset, folders_queryset):
try:
check_files_read_permissions(request, files_queryset)
check_folder_read_permissions(request, folders_queryset)
check_files_edit_permissions(request, files_queryset)
check_folder_edit_permissions(request, folders_queryset)
except PermissionDenied:
return True
return False
def _get_current_action_folder(self, request, files_queryset, folders_queryset):
if files_queryset:
return files_queryset[0].folder
elif folders_queryset:
return folders_queryset[0].parent
else:
return None
def _list_folders_to_copy_or_move(self, request, folders):
for fo in folders:
yield self._format_callback(fo, request.user, self.admin_site, set())
children = list(self._list_folders_to_copy_or_move(request, fo.children.all()))
children.extend([self._format_callback(f, request.user, self.admin_site, set()) for f in sorted(fo.files)])
if children:
yield children
def _list_all_to_copy_or_move(self, request, files_queryset, folders_queryset):
to_copy_or_move = list(self._list_folders_to_copy_or_move(request, folders_queryset))
to_copy_or_move.extend([self._format_callback(f, request.user, self.admin_site, set()) for f in sorted(files_queryset)])
return to_copy_or_move
def _list_all_destination_folders_recursive(self, request, folders_queryset, current_folder, folders, allow_self, level):
for fo in folders:
if not allow_self and fo in folders_queryset:
# We do not allow moving to selected folders or their descendants
continue
if not fo.has_read_permission(request):
continue
# We do not allow copying/moving back to the folder itself
enabled = (allow_self or fo != current_folder) and fo.has_add_children_permission(request)
yield (fo, (mark_safe((" " * level) + force_text(fo)), enabled))
for c in self._list_all_destination_folders_recursive(request, folders_queryset, current_folder, fo.children.all(), allow_self, level + 1):
yield c
def _list_all_destination_folders(self, request, folders_queryset, current_folder, allow_self):
return list(self._list_all_destination_folders_recursive(request, folders_queryset, current_folder, FolderRoot().children, allow_self, 0))
def _move_files_and_folders_impl(self, files_queryset, folders_queryset, destination):
for f in files_queryset:
f.folder = destination
f.save()
for f in folders_queryset:
f.move_to(destination, 'last-child')
f.save()
def move_files_and_folders(self, request, files_queryset, folders_queryset):
opts = self.model._meta
app_label = opts.app_label
current_folder = self._get_current_action_folder(request, files_queryset, folders_queryset)
perms_needed = self._check_move_perms(request, files_queryset, folders_queryset)
to_move = self._list_all_to_copy_or_move(request, files_queryset, folders_queryset)
folders = self._list_all_destination_folders(request, folders_queryset, current_folder, False)
if request.method == 'POST' and request.POST.get('post'):
if perms_needed:
raise PermissionDenied
try:
destination = Folder.objects.get(pk=request.POST.get('destination'))
except Folder.DoesNotExist:
raise PermissionDenied
folders_dict = dict(folders)
if destination not in folders_dict or not folders_dict[destination][1]:
raise PermissionDenied
# We count only topmost files and folders here
n = files_queryset.count() + folders_queryset.count()
conflicting_names = [folder.name for folder in Folder.objects.filter(
parent=destination,
name__in=folders_queryset.values('name'))]
if conflicting_names:
messages.error(request, _("Folders with names %s already exist at the selected "
"destination") % ", ".join(conflicting_names))
elif n:
self._move_files_and_folders_impl(files_queryset, folders_queryset, destination)
self.message_user(request, _("Successfully moved %(count)d files and/or folders to folder '%(destination)s'.") % {
"count": n,
"destination": destination,
})
return None
context = admin_each_context(self.admin_site, request)
context.update({
"title": _("Move files and/or folders"),
"instance": current_folder,
"breadcrumbs_action": _("Move files and/or folders"),
"to_move": to_move,
"destination_folders": folders,
"files_queryset": files_queryset,
"folders_queryset": folders_queryset,
"perms_lacking": perms_needed,
"opts": opts,
"root_path": reverse('admin:index'),
"app_label": app_label,
"action_checkbox_name": helpers.ACTION_CHECKBOX_NAME,
})
# Display the destination folder selection page
return render(request, "admin/filer/folder/choose_move_destination.html", context)
move_files_and_folders.short_description = ugettext_lazy("Move selected files and/or folders")
def _rename_file(self, file_obj, form_data, counter, global_counter):
original_basename, original_extension = os.path.splitext(file_obj.original_filename)
if file_obj.name:
current_basename, current_extension = os.path.splitext(file_obj.name)
else:
current_basename = ""
current_extension = ""
file_obj.name = form_data['rename_format'] % {
'original_filename': file_obj.original_filename,
'original_basename': original_basename,
'original_extension': original_extension,
'current_filename': file_obj.name or "",
'current_basename': current_basename,
'current_extension': current_extension,
'current_folder': file_obj.folder.name,
'counter': counter + 1, # 1-based
'global_counter': global_counter + 1, # 1-based
}
file_obj.save()
def _rename_files(self, files, form_data, global_counter):
n = 0
for f in sorted(files):
self._rename_file(f, form_data, n, global_counter + n)
n += 1
return n
def _rename_folder(self, folder, form_data, global_counter):
return self._rename_files_impl(folder.files.all(), folder.children.all(), form_data, global_counter)
def _rename_files_impl(self, files_queryset, folders_queryset, form_data, global_counter):
n = 0
for f in folders_queryset:
n += self._rename_folder(f, form_data, global_counter + n)
n += self._rename_files(files_queryset, form_data, global_counter + n)
return n
def rename_files(self, request, files_queryset, folders_queryset):
opts = self.model._meta
app_label = opts.app_label
current_folder = self._get_current_action_folder(request, files_queryset, folders_queryset)
perms_needed = self._check_move_perms(request, files_queryset, folders_queryset)
to_rename = self._list_all_to_copy_or_move(request, files_queryset, folders_queryset)
if request.method == 'POST' and request.POST.get('post'):
if perms_needed:
raise PermissionDenied
form = RenameFilesForm(request.POST)
if form.is_valid():
if files_queryset.count() + folders_queryset.count():
n = self._rename_files_impl(files_queryset, folders_queryset, form.cleaned_data, 0)
self.message_user(request, _("Successfully renamed %(count)d files.") % {
"count": n,
})
return None
else:
form = RenameFilesForm()
context = admin_each_context(self.admin_site, request)
context.update({
"title": _("Rename files"),
"instance": current_folder,
"breadcrumbs_action": _("Rename files"),
"to_rename": to_rename,
"rename_form": form,
"files_queryset": files_queryset,
"folders_queryset": folders_queryset,
"perms_lacking": perms_needed,
"opts": opts,
"root_path": reverse('admin:index'),
"app_label": app_label,
"action_checkbox_name": helpers.ACTION_CHECKBOX_NAME,
})
# Display the rename format selection page
return render(request, "admin/filer/folder/choose_rename_format.html", context)
rename_files.short_description = ugettext_lazy("Rename files")
def _generate_new_filename(self, filename, suffix):
basename, extension = os.path.splitext(filename)
return basename + suffix + extension
def _copy_file(self, file_obj, destination, suffix, overwrite):
if overwrite:
# Not yet implemented as we have to find a portable (for different storage backends) way to overwrite files
raise NotImplementedError
# We are assuming here that we are operating on an already saved database objects with current database state available
filename = self._generate_new_filename(file_obj.file.name, suffix)
# Due to how inheritance works, we have to set both pk and id to None
file_obj.pk = None
file_obj.id = None
file_obj.save()
file_obj.folder = destination
file_obj.file = file_obj._copy_file(filename)
file_obj.original_filename = self._generate_new_filename(file_obj.original_filename, suffix)
file_obj.save()
def _copy_files(self, files, destination, suffix, overwrite):
for f in files:
self._copy_file(f, destination, suffix, overwrite)
return len(files)
def _get_available_name(self, destination, name):
count = itertools.count(1)
original = name
while destination.contains_folder(name):
name = "%s_%s" % (original, next(count))
return name
def _copy_folder(self, folder, destination, suffix, overwrite):
if overwrite:
# Not yet implemented as we have to find a portable (for different storage backends) way to overwrite files
raise NotImplementedError
# TODO: Should we also allow not to overwrite the folder if it exists, but just copy into it?
# TODO: Is this a race-condition? Would this be a problem?
foldername = self._get_available_name(destination, folder.name)
old_folder = Folder.objects.get(pk=folder.pk)
# Due to how inheritance works, we have to set both pk and id to None
folder.pk = None
folder.id = None
folder.name = foldername
folder.insert_at(destination, 'last-child', True) # We save folder here
for perm in FolderPermission.objects.filter(folder=old_folder):
perm.pk = None
perm.id = None
perm.folder = folder
perm.save()
return 1 + self._copy_files_and_folders_impl(old_folder.files.all(), old_folder.children.all(), folder, suffix, overwrite)
def _copy_files_and_folders_impl(self, files_queryset, folders_queryset, destination, suffix, overwrite):
n = self._copy_files(files_queryset, destination, suffix, overwrite)
for f in folders_queryset:
n += self._copy_folder(f, destination, suffix, overwrite)
return n
def copy_files_and_folders(self, request, files_queryset, folders_queryset):
opts = self.model._meta
app_label = opts.app_label
current_folder = self._get_current_action_folder(request, files_queryset, folders_queryset)
perms_needed = self._check_copy_perms(request, files_queryset, folders_queryset)
to_copy = self._list_all_to_copy_or_move(request, files_queryset, folders_queryset)
folders = self._list_all_destination_folders(request, folders_queryset, current_folder, False)
if request.method == 'POST' and request.POST.get('post'):
if perms_needed:
raise PermissionDenied
form = CopyFilesAndFoldersForm(request.POST)
if form.is_valid():
try:
destination = Folder.objects.get(pk=request.POST.get('destination'))
except Folder.DoesNotExist:
raise PermissionDenied
folders_dict = dict(folders)
if destination not in folders_dict or not folders_dict[destination][1]:
raise PermissionDenied
if files_queryset.count() + folders_queryset.count():
# We count all files and folders here (recursivelly)
n = self._copy_files_and_folders_impl(files_queryset, folders_queryset, destination, form.cleaned_data['suffix'], False)
self.message_user(request, _("Successfully copied %(count)d files and/or folders to folder '%(destination)s'.") % {
"count": n,
"destination": destination,
})
return None
else:
form = CopyFilesAndFoldersForm()
try:
selected_destination_folder = int(request.POST.get('destination', 0))
except ValueError:
if current_folder:
selected_destination_folder = current_folder.pk
else:
selected_destination_folder = 0
context = admin_each_context(self.admin_site, request)
context.update({
"title": _("Copy files and/or folders"),
"instance": current_folder,
"breadcrumbs_action": _("Copy files and/or folders"),
"to_copy": to_copy,
"destination_folders": folders,
"selected_destination_folder": selected_destination_folder,
"copy_form": form,
"files_queryset": files_queryset,
"folders_queryset": folders_queryset,
"perms_lacking": perms_needed,
"opts": opts,
"root_path": reverse('admin:index'),
"app_label": app_label,
"action_checkbox_name": helpers.ACTION_CHECKBOX_NAME,
})
# Display the destination folder selection page
return render(request, "admin/filer/folder/choose_copy_destination.html", context)
copy_files_and_folders.short_description = ugettext_lazy("Copy selected files and/or folders")
def _check_resize_perms(self, request, files_queryset, folders_queryset):
try:
check_files_read_permissions(request, files_queryset)
check_folder_read_permissions(request, folders_queryset)
check_files_edit_permissions(request, files_queryset)
except PermissionDenied:
return True
return False
def _list_folders_to_resize(self, request, folders):
for fo in folders:
children = list(self._list_folders_to_resize(request, fo.children.all()))
children.extend([self._format_callback(f, request.user, self.admin_site, set()) for f in sorted(fo.files) if isinstance(f, Image)])
if children:
yield self._format_callback(fo, request.user, self.admin_site, set())
yield children
def _list_all_to_resize(self, request, files_queryset, folders_queryset):
to_resize = list(self._list_folders_to_resize(request, folders_queryset))
to_resize.extend([self._format_callback(f, request.user, self.admin_site, set()) for f in sorted(files_queryset) if isinstance(f, Image)])
return to_resize
def _new_subject_location(self, original_width, original_height, new_width, new_height, x, y, crop):
# TODO: We could probably do better
return (round(new_width / 2), round(new_height / 2))
def _resize_image(self, image, form_data):
original_width = float(image.width)
original_height = float(image.height)
thumbnailer = FilerActionThumbnailer(file=image.file.file, name=image.file.name, source_storage=image.file.source_storage, thumbnail_storage=image.file.source_storage)
# This should overwrite the original image
new_image = thumbnailer.get_thumbnail({
'size': (form_data['width'], form_data['height']),
'crop': form_data['crop'],
'upscale': form_data['upscale'],
'subject_location': image.subject_location,
})
image.file.file = new_image.file
image.generate_sha1()
image.save() # Also gets new width and height
subject_location = normalize_subject_location(image.subject_location)
if subject_location:
(x, y) = subject_location
x = float(x)
y = float(y)
new_width = float(image.width)
new_height = float(image.height)
(new_x, new_y) = self._new_subject_location(original_width, original_height, new_width, new_height, x, y, form_data['crop'])
image.subject_location = "%d,%d" % (new_x, new_y)
image.save()
def _resize_images(self, files, form_data):
n = 0
for f in files:
if isinstance(f, Image):
self._resize_image(f, form_data)
n += 1
return n
def _resize_folder(self, folder, form_data):
return self._resize_images_impl(folder.files.all(), folder.children.all(), form_data)
def _resize_images_impl(self, files_queryset, folders_queryset, form_data):
n = self._resize_images(files_queryset, form_data)
for f in folders_queryset:
n += self._resize_folder(f, form_data)
return n
def resize_images(self, request, files_queryset, folders_queryset):
opts = self.model._meta
app_label = opts.app_label
current_folder = self._get_current_action_folder(request, files_queryset, folders_queryset)
perms_needed = self._check_resize_perms(request, files_queryset, folders_queryset)
to_resize = self._list_all_to_resize(request, files_queryset, folders_queryset)
if request.method == 'POST' and request.POST.get('post'):
if perms_needed:
raise PermissionDenied
form = ResizeImagesForm(request.POST)
if form.is_valid():
if form.cleaned_data.get('thumbnail_option'):
form.cleaned_data['width'] = form.cleaned_data['thumbnail_option'].width
form.cleaned_data['height'] = form.cleaned_data['thumbnail_option'].height
form.cleaned_data['crop'] = form.cleaned_data['thumbnail_option'].crop
form.cleaned_data['upscale'] = form.cleaned_data['thumbnail_option'].upscale
if files_queryset.count() + folders_queryset.count():
# We count all files here (recursivelly)
n = self._resize_images_impl(files_queryset, folders_queryset, form.cleaned_data)
self.message_user(request, _("Successfully resized %(count)d images.") % {
"count": n,
})
return None
else:
form = ResizeImagesForm()
context = admin_each_context(self.admin_site, request)
context.update({
"title": _("Resize images"),
"instance": current_folder,
"breadcrumbs_action": _("Resize images"),
"to_resize": to_resize,
"resize_form": form,
"cmsplugin_enabled": 'cmsplugin_filer_image' in django_settings.INSTALLED_APPS,
"files_queryset": files_queryset,
"folders_queryset": folders_queryset,
"perms_lacking": perms_needed,
"opts": opts,
"root_path": reverse('admin:index'),
"app_label": app_label,
"action_checkbox_name": helpers.ACTION_CHECKBOX_NAME,
})
# Display the resize options page
return render(request, "admin/filer/folder/choose_images_resize_options.html", context)
resize_images.short_description = ugettext_lazy("Resize selected images")
|
o-zander/django-filer
|
filer/admin/folderadmin.py
|
Python
|
bsd-3-clause
| 53,477
|
import sys
import time
import json
import logging
import random
import tornado.options
from tornado.options import define, options
from tornado import gen
define('srp_root',default='http://192.168.56.1')
#define('srp_root',default='https://remote-staging.utorrent.com')
#define('srp_root',default='https://remote.utorrent.com')
define('debug',default=True)
define('verbose',default=1, type=int)
tornado.options.parse_command_line()
if options.debug:
import pdb
import tornado.ioloop
from falcon_api.session import Session
from falcon_api.util import asyncsleep
from falcon_api.classic import Client
import tornado.httpclient
httpclient = tornado.httpclient.AsyncHTTPClient(force_instance=True, max_clients=1)
@gen.engine
def test_login():
username = sys.argv[1]
password = sys.argv[2]
# check result..
#torrent = 'http://www.clearbits.net/get/503-control-alt-deus---made-of-fire.torrent'
hash = ''.join([random.choice( list('abcdef') + map(str,range(10)) ) for _ in range(40)])
torrent = 'magnet:?xt=urn:btih:%s' % hash
for _ in range(1):
client = Client(username, password)
client.sync()
yield gen.Task( asyncsleep, 1 )
#client.add_url(torrent)
client.stop()
tasks = []
for hash, torrent in client.torrents.items():
if torrent.get('progress') == 1000:
tasks.append( gen.Task( torrent.fetch_files ) )
tasks.append( gen.Task( torrent.fetch_metadata ) )
responses = yield gen.Multi( tasks )
logging.info('responses %s' % [r.code for r in responses])
tasks = []
for hash, torrent in client.torrents.items():
if torrent.get('progress') == 1000:
for file in torrent.files:
link = file.webseed_link()
print link
request = tornado.httpclient.HTTPRequest(link,
validate_cert=False)
tasks.append( gen.Task( httpclient.fetch, request ) )
while tasks:
some_tasks = [tasks.pop() for _ in range(5)]
logging.info('executing tasks of len %s' % len(some_tasks))
responses = yield gen.Multi( some_tasks )
logging.info('responses %s' % [(r.code, len(r.body)) for r in responses])
if False:
tasks = []
for hash, torrent in client.torrents.items():
if torrent.get('progress') == 1000:
link = torrent.webseed_link()
print torrent.get('name'), torrent.get('progress'), link
request = tornado.httpclient.HTTPRequest(link,
validate_cert=False)
tasks.append( gen.Task( httpclient.fetch, request ) )
responses = yield gen.Multi( tasks )
logging.info('responses %s' % [r.code for r in responses])
if __name__ == '__main__':
ioloop = tornado.ioloop.IOLoop.instance()
test_login()
ioloop.start()
|
leiferikb/bitpop-private
|
bitpop_specific/extensions/bittorrent_surf/app/lib/falcon-api/python/falcon_api/test/classic.py
|
Python
|
bsd-3-clause
| 2,962
|
from __future__ import absolute_import
import os
import time
from math import pi
import numpy as nm
from sfepy.base.base import Struct, output, get_default
from sfepy.applications import PDESolverApp
from sfepy.solvers import Solver
from six.moves import range
def guess_n_eigs(n_electron, n_eigs=None):
"""
Guess the number of eigenvalues (energies) to compute so that the smearing
iteration converges. Passing n_eigs overrides the guess.
"""
if n_eigs is not None: return n_eigs
if n_electron > 2:
n_eigs = int(1.2 * ((0.5 * n_electron) + 5))
else:
n_eigs = n_electron
return n_eigs
class SchroedingerApp(PDESolverApp):
"""
Base application for electronic structure calculations.
Subclasses should typically override `solve_eigen_problem()` method.
This class allows solving only simple single electron problems,
e.g. well, oscillator, hydrogen atom and boron atom with 1 electron.
"""
@staticmethod
def process_options(options):
"""
Application options setup. Sets default values for missing
non-compulsory options.
Options:
save_eig_vectors : (from_largest, from_smallest) or None
If None, save all.
"""
get = options.get
n_electron = get('n_electron', 5)
n_eigs = guess_n_eigs(n_electron, n_eigs=get('n_eigs', None))
return Struct(eigen_solver=get('eigen_solver', None,
'missing "eigen_solver" in options!'),
n_electron=n_electron,
n_eigs=n_eigs,
save_eig_vectors=get('save_eig_vectors', None))
def __init__(self, conf, options, output_prefix, **kwargs):
PDESolverApp.__init__(self, conf, options, output_prefix,
init_equations=False)
def setup_options(self):
PDESolverApp.setup_options(self)
opts = SchroedingerApp.process_options(self.conf.options)
self.app_options += opts
def setup_output(self):
"""
Setup various file names for the output directory given by
`self.problem.output_dir`.
"""
output_dir = self.problem.output_dir
opts = self.app_options
opts.output_dir = output_dir
self.mesh_results_name = os.path.join(opts.output_dir,
self.problem.get_output_name())
self.eig_results_name = os.path.join(opts.output_dir,
self.problem.ofn_trunk
+ '_eigs.txt')
def call(self):
# This cannot be in __init__(), as parametric calls may change
# the output directory.
self.setup_output()
evp = self.solve_eigen_problem()
output("solution saved to %s" % self.problem.get_output_name())
output("in %s" % self.app_options.output_dir)
if self.post_process_hook_final is not None: # User postprocessing.
self.post_process_hook_final(self.problem, evp=evp)
return evp
def solve_eigen_problem(self):
options = self.options
opts = self.app_options
pb = self.problem
dim = pb.domain.mesh.dim
pb.set_equations(pb.conf.equations)
pb.time_update()
output('assembling lhs...')
tt = time.clock()
mtx_a = pb.evaluate(pb.conf.equations['lhs'], mode='weak',
auto_init=True, dw_mode='matrix')
output('...done in %.2f s' % (time.clock() - tt))
output('assembling rhs...')
tt = time.clock()
mtx_b = pb.evaluate(pb.conf.equations['rhs'], mode='weak',
dw_mode='matrix')
output('...done in %.2f s' % (time.clock() - tt))
n_eigs = get_default(opts.n_eigs, mtx_a.shape[0])
output('computing resonance frequencies...')
eig = Solver.any_from_conf(pb.get_solver_conf(opts.eigen_solver))
eigs, mtx_s_phi = eig(mtx_a, mtx_b, n_eigs, eigenvectors=True)
output('...done')
bounding_box = pb.domain.mesh.get_bounding_box()
# this assumes a box (3D), or a square (2D):
a = bounding_box[1][0] - bounding_box[0][0]
E_exact = None
if options.hydrogen or options.boron:
if options.hydrogen:
Z = 1
elif options.boron:
Z = 5
if dim == 2:
E_exact = [-float(Z)**2/2/(n-0.5)**2/4
for n in [1]+[2]*3+[3]*5 + [4]*8 + [5]*15]
elif dim == 3:
E_exact = [-float(Z)**2/2/n**2 for n in [1]+[2]*2**2+[3]*3**2 ]
if options.well:
if dim == 2:
E_exact = [pi**2/(2*a**2)*x
for x in [2, 5, 5, 8, 10, 10, 13, 13,
17, 17, 18, 20, 20 ] ]
elif dim == 3:
E_exact = [pi**2/(2*a**2)*x
for x in [3, 6, 6, 6, 9, 9, 9, 11, 11,
11, 12, 14, 14, 14, 14, 14,
14, 17, 17, 17] ]
if options.oscillator:
if dim == 2:
E_exact = [1] + [2]*2 + [3]*3 + [4]*4 + [5]*5 + [6]*6
elif dim == 3:
E_exact = [float(1)/2+x for x in [1]+[2]*3+[3]*6+[4]*10 ]
if E_exact is not None:
output("a=%f" % a)
output("Energies:")
output("n exact FEM error")
for i, e in enumerate(eigs):
from numpy import NaN
if i < len(E_exact):
exact = E_exact[i]
err = 100*abs((exact - e)/exact)
else:
exact = NaN
err = NaN
output("%d: %.8f %.8f %5.2f%%" % (i, exact, e, err))
else:
output(eigs)
mtx_phi = self.make_full(mtx_s_phi)
self.save_results(eigs, mtx_phi)
return Struct(pb=pb, eigs=eigs, mtx_phi=mtx_phi)
def make_full(self, mtx_s_phi):
variables = self.problem.get_variables()
mtx_phi = nm.empty((variables.di.ptr[-1], mtx_s_phi.shape[1]),
dtype=nm.float64)
for ii in range(mtx_s_phi.shape[1]):
mtx_phi[:,ii] = variables.make_full_vec(mtx_s_phi[:,ii])
return mtx_phi
def save_results(self, eigs, mtx_phi, out=None,
mesh_results_name=None, eig_results_name=None):
mesh_results_name = get_default(mesh_results_name,
self.mesh_results_name)
eig_results_name = get_default(eig_results_name,
self.eig_results_name)
pb = self.problem
save = self.app_options.save_eig_vectors
n_eigs = self.app_options.n_eigs
out = get_default(out, {})
state = pb.create_state()
aux = {}
for ii in range(eigs.shape[0]):
if save is not None:
if (ii > save[0]) and (ii < (n_eigs - save[1])): continue
state.set_full(mtx_phi[:,ii])
aux = state.create_output_dict()
key = list(aux.keys())[0]
out[key+'%03d' % ii] = aux[key]
if aux.get('__mesh__') is not None:
out['__mesh__'] = aux['__mesh__']
pb.save_state(mesh_results_name, out=out)
fd = open(eig_results_name, 'w')
eigs.tofile(fd, ' ')
fd.close()
|
lokik/sfepy
|
sfepy/physics/schroedinger_app.py
|
Python
|
bsd-3-clause
| 7,582
|
"""
calabar.tunnels
This module encapsulates various tunnel processes and their management.
"""
import signal
import os
import sys
import psi.process
TUN_TYPE_STR = 'tunnel_type' # Configuration/dictionary key for the type of tunnel
# Should match the tunnel_type argument to Tunnel __init__ methods
PROC_NOT_RUNNING = [
psi.process.PROC_STATUS_DEAD,
psi.process.PROC_STATUS_ZOMBIE,
psi.process.PROC_STATUS_STOPPED
]
def is_really_running(tunnel):
pt = psi.process.ProcessTable()
try:
proc = pt.get(tunnel.proc.pid, None)
except AttributeError:
# we might not actually have a tunnel.proc or it might poof while we're checking
return False
if proc:
status = proc.status
if not status in PROC_NOT_RUNNING:
return True
return False
class TunnelsAlreadyLoadedException(Exception):
"""Once tunnels are loaded the first time, other methods must be used to
update them"""
pass
class ExecutableNotFound(Exception):
"""
The given tunnel executable wasn't found or isn't executable.
"""
pass
class TunnelTypeDoesNotMatch(Exception):
"""
The given ``tun_type`` doesn't match expected Tunnel.
"""
pass
class TunnelManager():
"""
A class for working with multiple :class:`calabar.tunnels.base.TunnelBase`
tunnels.
Creating this tunnels registers it for SIG_CHLD signals, so only ONE
TunnelManager can exist at a time for purposes of keeping the other tunnels
running.
"""
def __init__(self):
self.tunnels = []
self._register_for_close()
def load_tunnels(self, config):
"""
Load config information to create all required tunnels.
"""
if self.tunnels:
raise TunnelsAlreadyLoadedException("TunnelManager.load_tunnels can't be called after tunnels have already been loaded. Use update_tunnels() instead")
tun_confs_d = get_tunnels(config)
for name, tun_conf_d in tun_confs_d.items():
t = self._load_tunnel(name, tun_conf_d)
self.tunnels.append(t)
def _load_tunnel(self, tunnel_name, tun_conf_d):
"""
Create and return a tunnel instance from a ``tun_conf_d`` dictionary.
``tun_conf_d`` is a dictionary matching the output of a tunnel's
implementation of :mod:`calabar.tunnels.base.TunnelBase:parse_configuration`
method.
"""
from calabar.conf import TUNNELS
tun_type = tun_conf_d[TUN_TYPE_STR]
for tunnel in TUNNELS:
if tunnel.TUNNEL_TYPE == tun_type:
t = tunnel(name=tunnel_name, **tun_conf_d)
return t
raise NotImplementedError()
def start_tunnels(self):
"""
Start all of the configured tunnels and register to keep them running.
"""
for t in self.tunnels:
try:
t.open()
except ExecutableNotFound, e:
print >> sys.stderr, e
def continue_tunnels(self):
"""
Ensure that all of the tunnels are still running.
"""
for t in self.tunnels:
if not t.is_running():
print "TUNNEL [%s] EXITED" % t.name
print "RESTARTING"
try:
t.open()
except ExecutableNotFound, e:
print >> sys.stderr, e
else:
print "[%s]:%s running" % (t.name, t.proc.pid)
def _register_for_close(self):
"""
Register the child tunnel process for a close event. This keeps process
from becoming defunct.
"""
signal.signal(signal.SIGCHLD, self._handle_child_close)
# Register for a termination signal so we can clean up children
signal.signal(signal.SIGTERM, self._handle_terminate)
def _handle_terminate(self, signum, frame):
for t in self.tunnels:
t.close(wait=False)
exit()
def _handle_child_close(self, signum, frame):
"""
Handle a closed child.
Call :mod:os.wait() on the process so that it's not defunct.
"""
assert signum == signal.SIGCHLD
print "CHILD TUNNEL CLOSED"
pid, exit_status = os.wait()
for t in self.tunnels:
# For all of the "closing" tunnels, if they've stopped running, handle the close
if t.closing and not t.is_running():
# Assume the same exit_status
t.handle_closed(exit_status)
TUNNEL_PREFIX = 'tunnel:'
def get_tunnels(config):
"""
Return a dictionary of dictionaries containg tunnel configurations based on the
given SafeConfigParser instance.
An example return value might be::
{
'foo':
{
'tunnel_type': 'vpnc',
'conf_file': '/etc/calabar/foo.conf',
'ips': [10.10.254.1]
},
'bar':
{
'tunnel_type': 'ssh',
'from': 'root@10.10.251.2:386',
'to': '127.0.0.1:387
}
}
"""
tun_confs_d = {}
for section in config.sections():
if section.startswith(TUNNEL_PREFIX):
tun_conf_d = parse_tunnel(config, section)
tun_name = section[len(TUNNEL_PREFIX):]
tun_confs_d[tun_name] = tun_conf_d
return tun_confs_d
def parse_tunnel(config, section):
"""
Parse the given ``section`` in the given ``config``
:mod:`ConfigParser.ConfigParser` object to generate a tunnel configuration
dictionary using all configured tunnel types and their configuration
parsers.
"""
from calabar.conf import TUNNELS
tun_type = config.get(section, TUN_TYPE_STR)
for tunnel in TUNNELS:
if tun_type == tunnel.TUNNEL_TYPE:
tun_conf_d = tunnel.parse_configuration(config, section)
return tun_conf_d
raise NotImplementedError("The tunnel type [%s] isn't supported" % tun_type)
|
winhamwr/calabar
|
calabar/tunnels/__init__.py
|
Python
|
bsd-3-clause
| 6,085
|
__author__ = 'swhite'
"""
This package contains the test modules for the repository app of the ReFlow project,
organized by test type (unit, integration, etc.)
To run all the tests in the repository app, using the manage.py command:
"python manage.py test repository".
Notes:
- add new test constants in the constants module
"""
from unit_tests import *
from integration_tests import *
|
whitews/ReFlow
|
repository/tests/test.py
|
Python
|
bsd-3-clause
| 391
|
from django.conf.urls.defaults import *
from availablejob.views import *
urlpatterns = patterns("availablejob.views",
url(r"^sendcv/$",send_cv),
url(r"^detail/(?P<id>\d+)/$", 'detail',name="job-detail"),
url(r"^apply/(?P<id>\d+)/$", 'show_form',name="show-form"),
url(r"^$", 'index', name="vacancy-index"),
)
|
interalia/cmsplugin_availablejobs
|
availablejob/urls.py
|
Python
|
bsd-3-clause
| 330
|
# -*- coding: utf-8 -*-
"""
Image processing and feature extraction functions.
"""
import cv2
import numpy as np
def pad_image(im, width, height, border=255):
"""pad char image in a larger image"""
xoff = abs(int((im.shape[1] - width) / 2))
yoff = abs(int((im.shape[0] - height) / 2))
if width >= im.shape[1]:
x_min_old = 0
x_max_old = im.shape[1]
x_min_new = xoff
x_max_new = im.shape[1] + xoff
else:
x_min_old = xoff
x_max_old = width + xoff
x_min_new = 0
x_max_new = width
if height >= im.shape[0]:
y_min_old = 0
y_max_old = im.shape[0]
y_min_new = yoff
y_max_new = im.shape[0] + yoff
else:
y_min_old = yoff
y_max_old = height + yoff
y_min_new = 0
y_max_new = height
image_subset = im[y_min_old:y_max_old, x_min_old:x_max_old]
new_bmp = np.ones((height, width, 3), dtype=np.uint8) * border
new_bmp[y_min_new:y_max_new, x_min_new:x_max_new] = image_subset
return new_bmp
def transform_random(image, trans_size, rot_size, scale_size):
"""apply a small random transformation to an image"""
# TODO: make ranges of random numbers input parameters
trans = (np.random.rand(2) - 0.5) * np.array(trans_size)
rot = (np.random.rand(4) - 0.5) * rot_size
scale = 1.0 + scale_size * (np.random.rand(1)[0] - 0.5)
x_size = image.shape[1]
y_size = image.shape[0]
trans_to_center = np.float32(
[[1, 0, -x_size / 2.0],
[0, 1, -y_size / 2.0],
[0, 0, 1]])
trans_from_center = np.float32(
[[1, 0, x_size / 2.0],
[0, 1, y_size / 2.0],
[0, 0, 1]])
trans_random = np.float32(
[[1 + rot[0], 0 + rot[1], trans[0]],
[0 + rot[2], 1 + rot[3], trans[1]],
[0, 0, 1]])
trans_scale = np.identity(3, dtype=np.float32) * scale
tmat = np.dot(trans_from_center, np.dot(trans_scale, np.dot(trans_random, trans_to_center)))[0:2, :]
image_new = cv2.warpAffine(
image, tmat,
(image.shape[1], image.shape[0]),
borderValue=(255, 255, 255))
# cv2.imshow("image", image)
# cv2.imshow("new_image", image_new)
# cv2.waitKey()
return image_new
def filter_cc(image):
"""find connected components in a threshold image and white out
everything except the second largest"""
# TODO: better way to select relevant components
comp_filt = np.copy(image)
gray = 255 - np.array(np.sum(image, axis=2) / 3.0, dtype=np.uint8)
_, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
connectivity = 4
comps = cv2.connectedComponentsWithStats(thresh, connectivity, cv2.CV_32S)
labels = comps[1]
sizes = comps[2][:, cv2.CC_STAT_AREA]
# get index of second-largest component
if len(sizes) > 1:
second_largest_idx = np.argsort(sizes)[-2]
else:
second_largest_idx = np.argsort(sizes)[-1]
# eliminate everything else
for label_idx in range(len(sizes)):
if label_idx != second_largest_idx:
comp_filt[labels == label_idx] = 255
# cv2.imshow("image", image)
# cv2.imshow("gray", gray)
# cv2.imshow("thresh", thresh)
# cv2.imshow("comp_filt", comp_filt)
# cv2.waitKey()
return comp_filt
def align(image, x_align=True, y_align=True):
"""shift an image so the center of mass of the pixels is centered"""
# TODO: this should just operate on grayscale
gray = 255 - np.array(np.sum(image, axis=2) / 3.0, dtype=np.uint8)
if x_align:
x_size = image.shape[1]
x_mean = np.sum(np.sum(gray, axis=0) * np.arange(x_size)) / np.sum(gray)
x_shift = x_size / 2.0 - x_mean
else:
x_shift = 0.0
if y_align:
y_size = image.shape[0]
y_mean = np.sum(np.sum(gray, axis=1) * np.arange(y_size)) / np.sum(gray)
y_shift = y_size / 2.0 - y_mean
else:
y_shift = 0.0
tmat = np.float32(
[[1, 0, x_shift],
[0, 1, y_shift]])
new_image = cv2.warpAffine(
image, tmat, (image.shape[1], image.shape[0]), borderValue=(255, 255, 255))
# cv2.imshow("image", image)
# cv2.imshow("new_image", new_image)
# cv2.waitKey()
return new_image
def grayscale(image):
"""convert RGB ubyte image to grayscale"""
return np.sum(image, axis=2) / 3.0
def downsample(image, scale_factor):
"""downsample an image and unravel to create a feature vector"""
feats = cv2.resize(
image,
(int(image.shape[0] * scale_factor),
int(image.shape[1] * scale_factor)))
return feats
def downsample_4(image):
"""create a feature vector from four downsampling amounts"""
return downsample_multi(image, [0.4, 0.2, 0.1, 0.05])
def downsample_multi(image, scales):
"""create a feature vector from arbitrary downsampling amounts"""
return np.hstack([np.ravel(downsample(image, x)) for x in scales])
def max_pool(im):
"""perform 2x2 max pooling"""
return np.max(
np.stack(
(im[0::2, 0::2],
im[0::2, 1::2],
im[1::2, 0::2],
im[1::2, 1::2]),
axis=-1),
axis=-1)
def max_pool_multi(image, ns):
"""perform multiple levels of max pooling and unravel
to create a feature vector"""
# TODO: move this to a higher level
# image_gray = _grayscale(image)
if 1 in ns:
res = [image]
else:
res = []
for n in range(2, max(ns) + 1):
image = max_pool(image)
if n in ns:
res.append(image)
return np.hstack([np.ravel(y) for y in res])
def column_ex(gray):
"""experimental feature - something like the center of mass of
overlapping columns of the image"""
width = 2
# mul_mat = np.arange(y_size)[:, np.newaxis]
# for some reason, it works a lot better to not divide by the sum of the
# whole window but only the first column.
mul_mat = np.linspace(0, 1, gray.shape[0])[:, np.newaxis]
y_agg = np.array([(np.sum(gray[:, idx + width] * mul_mat) /
np.sum(gray[:, idx]))
for idx in range(gray.shape[1] - width)])
y_agg[~np.isfinite(y_agg)] = 0.0
res = np.hstack((y_agg, np.diff(y_agg)))
return res
def extract_pos(pos, im, border=255):
"""extract a position (tuple of start and end) from an image"""
# this is intended to have the correct logic to always return an image
# of the width of the position even if it is off the edge of the image
target_width = pos[1] - pos[0]
extract = im[:, np.maximum(pos[0], 0):pos[1]]
# print(cpos, extract.shape, im.shape)
if extract.shape[1] < target_width:
res = np.ones((im.shape[0], target_width, 3), dtype=np.ubyte) * border
if pos[0] < 0:
pr = (-pos[0], -pos[0] + extract.shape[1])
else:
pr = (0, extract.shape[1])
# print(pr, flush=True)
res[:, pr[0]:pr[1]] = extract
return res
else:
res = extract
return res
|
bdzimmer/handwriting
|
handwriting/improc.py
|
Python
|
bsd-3-clause
| 7,094
|
#!/usr/bin/env vpython
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import contextlib
import logging
import os
import subprocess
import sys
_SRC_ROOT = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', '..', '..'))
sys.path.append(os.path.join(_SRC_ROOT, 'third_party', 'catapult', 'devil'))
from devil import base_error
from devil.android import device_utils
from devil.android.sdk import adb_wrapper
from devil.android.sdk import version_codes
from devil.utils import logging_common
sys.path.append(os.path.join(_SRC_ROOT, 'build', 'android'))
import devil_chromium
_SCRIPT_PATH = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'asan_device_setup.sh'))
@contextlib.contextmanager
def _LogDevicesOnFailure(msg):
try:
yield
except base_error.BaseError:
logging.exception(msg)
logging.error('Devices visible to adb:')
for entry in adb_wrapper.AdbWrapper.Devices(desired_state=None,
long_list=True):
logging.error(' %s: %s',
entry[0].GetDeviceSerial(),
' '.join(entry[1:]))
raise
@contextlib.contextmanager
def Asan(args):
env = os.environ.copy()
env['ADB'] = args.adb
try:
with _LogDevicesOnFailure('Failed to set up the device.'):
device = device_utils.DeviceUtils.HealthyDevices(
device_arg=args.device)[0]
disable_verity = device.build_version_sdk >= version_codes.MARSHMALLOW
if disable_verity:
device.EnableRoot()
# TODO(crbug.com/790202): Stop logging output after diagnosing
# issues on android-asan.
verity_output = device.adb.DisableVerity()
if verity_output:
logging.info('disable-verity output:')
for line in verity_output.splitlines():
logging.info(' %s', line)
device.Reboot()
# Call EnableRoot prior to asan_device_setup.sh to ensure it doesn't
# get tripped up by the root timeout.
device.EnableRoot()
setup_cmd = [_SCRIPT_PATH, '--lib', args.lib]
if args.device:
setup_cmd += ['--device', args.device]
subprocess.check_call(setup_cmd, env=env)
yield
finally:
with _LogDevicesOnFailure('Failed to tear down the device.'):
device.EnableRoot()
teardown_cmd = [_SCRIPT_PATH, '--revert']
if args.device:
teardown_cmd += ['--device', args.device]
subprocess.check_call(teardown_cmd, env=env)
if disable_verity:
# TODO(crbug.com/790202): Stop logging output after diagnosing
# issues on android-asan.
verity_output = device.adb.EnableVerity()
if verity_output:
logging.info('enable-verity output:')
for line in verity_output.splitlines():
logging.info(' %s', line)
device.Reboot()
def main(raw_args):
parser = argparse.ArgumentParser()
logging_common.AddLoggingArguments(parser)
parser.add_argument(
'--adb', type=os.path.realpath, required=True,
help='Path to adb binary.')
parser.add_argument(
'--device',
help='Device serial.')
parser.add_argument(
'--lib', type=os.path.realpath, required=True,
help='Path to asan library.')
parser.add_argument(
'command', nargs='*',
help='Command to run with ASAN installed.')
args = parser.parse_args()
# TODO(crbug.com/790202): Remove this after diagnosing issues
# with android-asan.
if not args.quiet:
args.verbose += 1
logging_common.InitializeLogging(args)
devil_chromium.Initialize(adb_path=args.adb)
with Asan(args):
if args.command:
return subprocess.call(args.command)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
endlessm/chromium-browser
|
tools/android/asan/third_party/with_asan.py
|
Python
|
bsd-3-clause
| 3,894
|
# Copyright 2013 Google Inc. All Rights Reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
"""A simple, direct connection to the vtgate proxy server, using gRPC.
"""
import logging
import re
from urlparse import urlparse
# Import main protobuf library first
# to work around import order issues.
import google.protobuf # pylint: disable=unused-import
from grpc.beta import implementations
from grpc.beta import interfaces
from grpc.framework.interfaces.face import face
from vtproto import vtgate_pb2
from vtproto import vtgateservice_pb2
from vtdb import dbexceptions
from vtdb import proto3_encoding
from vtdb import vtdb_logger
from vtdb import vtgate_client
from vtdb import vtgate_cursor
from vtdb import vtgate_utils
_errno_pattern = re.compile(r'\(errno (\d+)\)', re.IGNORECASE)
_throttler_err_pattern = re.compile(
r'exceeded (.*) quota, rate limiting', re.IGNORECASE)
class GRPCVTGateConnection(vtgate_client.VTGateClient,
proto3_encoding.Proto3Connection):
"""A direct gRPC connection to the vtgate query service, using proto3.
"""
def __init__(self, addr, timeout,
root_certificates=None, private_key=None, certificate_chain=None,
**kwargs):
"""Creates a new GRPCVTGateConnection.
Args:
addr: address to connect to.
timeout: connection time out.
root_certificates: PEM_encoded root certificates.
private_key: PEM-encoded private key.
certificate_chain: PEM-encoded certificate chain.
**kwargs: passed up.
"""
super(GRPCVTGateConnection, self).__init__(addr, timeout, **kwargs)
self.stub = None
self.root_certificates = root_certificates
self.private_key = private_key
self.certificate_chain = certificate_chain
self.logger_object = vtdb_logger.get_logger()
def dial(self):
if self.stub:
self.stub.close()
p = urlparse('http://' + self.addr)
if self.root_certificates or self.private_key or self.certificate_chain:
creds = implementations.ssl_channel_credentials(
self.root_certificates, self.private_key, self.certificate_chain)
channel = implementations.secure_channel(p.hostname, p.port, creds)
else:
channel = implementations.insecure_channel(p.hostname, p.port)
self.stub = vtgateservice_pb2.beta_create_Vitess_stub(channel)
def close(self):
"""close closes the server connection and frees up associated resources.
The stub object is managed by the gRPC library, removing references
to it will just close the channel.
"""
if self.session and self.session.in_transaction:
self.rollback()
self.stub = None
def is_closed(self):
return self.stub is None
def cursor(self, *pargs, **kwargs):
cursorclass = kwargs.pop('cursorclass', None) or vtgate_cursor.VTGateCursor
return cursorclass(self, *pargs, **kwargs)
def begin(self, effective_caller_id=None):
try:
request = self.begin_request(effective_caller_id)
response = self.stub.Begin(request, self.timeout)
self.update_session(response)
except (face.AbortionError, vtgate_utils.VitessError) as e:
raise _convert_exception(e, 'Begin')
def commit(self):
try:
request = self.commit_request()
self.stub.Commit(request, self.timeout)
except (face.AbortionError, vtgate_utils.VitessError) as e:
raise _convert_exception(e, 'Commit')
finally:
self.session = None
def rollback(self):
try:
request = self.rollback_request()
self.stub.Rollback(request, self.timeout)
except (face.AbortionError, vtgate_utils.VitessError) as e:
raise _convert_exception(e, 'Rollback')
finally:
self.session = None
@vtgate_utils.exponential_backoff_retry((dbexceptions.TransientError))
def _execute(
self, sql, bind_variables, tablet_type, keyspace_name=None,
shards=None, keyspace_ids=None, keyranges=None,
entity_keyspace_id_map=None, entity_column_name=None,
not_in_transaction=False, effective_caller_id=None, **kwargs):
# FIXME(alainjobart): keyspace should be in routing_kwargs,
# as it's not used for v3.
try:
request, routing_kwargs, method_name = self.execute_request_and_name(
sql, bind_variables, tablet_type,
keyspace_name, shards, keyspace_ids, keyranges,
entity_column_name, entity_keyspace_id_map,
not_in_transaction, effective_caller_id)
method = getattr(self.stub, method_name)
response = method(request, self.timeout)
return self.process_execute_response(method_name, response)
except (face.AbortionError, vtgate_utils.VitessError) as e:
self.logger_object.log_private_data(bind_variables)
raise _convert_exception(
e, method_name,
sql=sql, keyspace=keyspace_name, tablet_type=tablet_type,
not_in_transaction=not_in_transaction,
**routing_kwargs)
@vtgate_utils.exponential_backoff_retry((dbexceptions.TransientError))
def _execute_batch(
self, sql_list, bind_variables_list, keyspace_list, keyspace_ids_list,
shards_list, tablet_type, as_transaction, effective_caller_id=None,
**kwargs):
try:
request, method_name = self.execute_batch_request_and_name(
sql_list, bind_variables_list, keyspace_list,
keyspace_ids_list, shards_list,
tablet_type, as_transaction, effective_caller_id)
method = getattr(self.stub, method_name)
response = method(request, self.timeout)
return self.process_execute_batch_response(method_name, response)
except (face.AbortionError, vtgate_utils.VitessError) as e:
self.logger_object.log_private_data(bind_variables_list)
raise _convert_exception(
e, method_name,
sqls=sql_list, tablet_type=tablet_type,
as_transaction=as_transaction)
@vtgate_utils.exponential_backoff_retry((dbexceptions.TransientError))
def _stream_execute(
self, sql, bind_variables, tablet_type, keyspace_name=None,
shards=None, keyspace_ids=None, keyranges=None,
effective_caller_id=None,
**kwargs):
try:
request, routing_kwargs, method_name = self.stream_execute_request_and_name(
sql, bind_variables, tablet_type,
keyspace_name,
shards,
keyspace_ids,
keyranges,
effective_caller_id)
method = getattr(self.stub, method_name)
it = method(request, self.timeout)
first_response = it.next()
except (face.AbortionError, vtgate_utils.VitessError) as e:
self.logger_object.log_private_data(bind_variables)
raise _convert_exception(
e, method_name,
sql=sql, keyspace=keyspace_name, tablet_type=tablet_type,
**routing_kwargs)
fields, convs = self.build_conversions(first_response.result.fields)
def row_generator():
try:
for response in it:
for row in response.result.rows:
yield tuple(proto3_encoding.make_row(row, convs))
except Exception:
logging.exception('gRPC low-level error')
raise
return row_generator(), fields
def get_srv_keyspace(self, name):
try:
request = vtgate_pb2.GetSrvKeyspaceRequest(
keyspace=name,
)
response = self.stub.GetSrvKeyspace(request, self.timeout)
return self.keyspace_from_response(name, response)
except (face.AbortionError, vtgate_utils.VitessError) as e:
raise _convert_exception(e, keyspace=name)
def _convert_exception(exc, *args, **kwargs):
"""This parses the protocol exceptions to the api interface exceptions.
This also logs the exception and increments the appropriate error counters.
Args:
exc: raw protocol exception.
*args: additional args from the raising site.
**kwargs: additional keyword args from the raising site.
They will be converted into a single string, and added as an extra
arg to the exception.
Returns:
Api interface exceptions - dbexceptions with new args.
"""
kwargs_as_str = vtgate_utils.convert_exception_kwargs(kwargs)
exc.args += args
if kwargs_as_str:
exc.args += kwargs_as_str,
new_args = (type(exc).__name__,) + exc.args
if isinstance(exc, vtgate_utils.VitessError):
new_exc = exc.convert_to_dbexception(new_args)
elif isinstance(exc, face.ExpirationError):
# face.ExpirationError is returned by the gRPC library when
# a request times out. Note it is a subclass of face.AbortionError
# so we have to test for it before.
new_exc = dbexceptions.TimeoutError(new_args)
elif isinstance(exc, face.AbortionError):
# face.AbortionError is the toplevel error returned by gRPC for any
# RPC that finishes earlier than expected.
msg = exc.details
if exc.code == interfaces.StatusCode.UNAVAILABLE:
if _throttler_err_pattern.search(msg):
return dbexceptions.ThrottledError(new_args)
else:
return dbexceptions.TransientError(new_args)
elif exc.code == interfaces.StatusCode.ALREADY_EXISTS:
new_exc = _prune_integrity_error(msg, new_args)
else:
# Unhandled RPC application error
new_exc = dbexceptions.DatabaseError(new_args + (msg,))
else:
new_exc = exc
vtgate_utils.log_exception(
new_exc,
keyspace=kwargs.get('keyspace'), tablet_type=kwargs.get('tablet_type'))
return new_exc
def _prune_integrity_error(msg, exc_args):
"""Prunes an integrity error message and returns an IntegrityError."""
parts = _errno_pattern.split(msg)
pruned_msg = msg[:msg.find(parts[2])]
exc_args = (pruned_msg,) + tuple(exc_args[1:])
return dbexceptions.IntegrityError(exc_args)
vtgate_client.register_conn_class('grpc', GRPCVTGateConnection)
|
AndyDiamondstein/vitess
|
py/vtdb/grpc_vtgate_client.py
|
Python
|
bsd-3-clause
| 9,838
|
# -*- coding: utf-8 -*-
import json
import logging
import os
import pickle
import sys
import uuid
from unittest.mock import Mock, PropertyMock, patch, MagicMock, ANY
import celery
import yaml
from billiard.einfo import ExceptionInfo
from django.conf import settings
from django.contrib.auth.models import Group, User
from django.contrib.gis.geos import GEOSGeometry, Polygon
from django.test import TestCase
from django.test.utils import override_settings
from django.utils import timezone
from eventkit_cloud.celery import TaskPriority, app
from eventkit_cloud.jobs.models import DatamodelPreset, DataProvider, Job, DataProviderType
from eventkit_cloud.tasks.enumerations import TaskState
from eventkit_cloud.tasks.export_tasks import (
ExportTask,
export_task_error_handler,
finalize_run_task,
kml_export_task,
mapproxy_export_task,
geopackage_export_task,
shp_export_task,
arcgis_feature_service_export_task,
pick_up_run_task,
cancel_export_provider_task,
kill_task,
geotiff_export_task,
nitf_export_task,
bounds_export_task,
parse_result,
finalize_export_provider_task,
FormatTask,
wait_for_providers_task,
create_zip_task,
pbf_export_task,
sqlite_export_task,
gpx_export_task,
mbtiles_export_task,
wfs_export_task,
vector_file_export_task,
raster_file_export_task,
osm_data_collection_pipeline,
reprojection_task,
ogcapi_process_export_task,
get_ogcapi_data,
)
from eventkit_cloud.tasks.export_tasks import zip_files
from eventkit_cloud.tasks.helpers import default_format_time
from eventkit_cloud.tasks.models import (
DataProviderTaskRecord,
ExportRun,
ExportTaskRecord,
FileProducingTaskResult,
RunZipFile,
)
from eventkit_cloud.tasks.task_base import LockingTask
logger = logging.getLogger(__name__)
test_cert_info = """
cert_info:
cert_path: '/path/to/fake/cert'
cert_pass_var: 'fakepass'
"""
expected_cert_info = {"cert_path": "/path/to/fake/cert", "cert_pass_var": "fakepass"}
class TestLockingTask(TestCase):
def test_locking_task(self):
task_id = "0123"
retries = False
task_name = "lock_test_task"
expected_lock_key = f"TaskLock_{task_name}_{task_id}_{retries}"
expected_result = "result"
# Create a test task...
@app.task(base=LockingTask)
def lock_test_task():
return expected_result
# ...mock the cache...
mock_cache = MagicMock()
mock_cache.add.side_effect = ["A Lock", None, None, None, None]
# ...create two separate test tasks...
lock_task = lock_task2 = lock_test_task
lock_task.cache = lock_task2.cache = mock_cache
# ..create a mock request...
mock_request = Mock(task_name=task_name, id=task_id, retries=False)
mock_request_stack = Mock()
mock_request_stack.top = mock_request
mock_push_request = Mock()
# ...with duplicate requests...
lock_task.request_stack = lock_task2.request_stack = mock_request_stack
lock_task.push_request = lock_task2.push_request = mock_push_request
# ...call first task ensure it returns...
result = lock_task.__call__()
self.assertEqual(result, expected_result)
mock_cache.add.assert_called_with(expected_lock_key, task_id, lock_task.lock_expiration)
# ...call a second task with duplicate id, ensure nothing returns.
result = lock_task2.__call__()
self.assertIsNone(result)
mock_cache.add.assert_called_with(expected_lock_key, task_id, lock_task.lock_expiration)
class ExportTaskBase(TestCase):
fixtures = ("osm_provider.json", "datamodel_presets.json")
def setUp(self):
self.maxDiff = None
self.path = os.path.dirname(os.path.realpath(__file__))
self.group, created = Group.objects.get_or_create(name="TestDefault")
with patch("eventkit_cloud.jobs.signals.Group") as mock_group:
mock_group.objects.get.return_value = self.group
self.user = User.objects.create(username="demo", email="demo@demo.com", password="demo")
bbox = Polygon.from_bbox((-10.85, 6.25, -10.62, 6.40))
tags = DatamodelPreset.objects.get(name="hdm").json_tags
self.assertEqual(259, len(tags))
the_geom = GEOSGeometry(bbox, srid=4326)
self.job = Job.objects.create(
name="TestJob", description="Test description", user=self.user, the_geom=the_geom, json_tags=tags
)
self.job.feature_save = True
self.job.feature_pub = True
self.job.save()
self.run = ExportRun.objects.create(job=self.job, user=self.user)
self.provider = DataProvider.objects.first()
class TestExportTasks(ExportTaskBase):
stage_dir = "/stage"
@patch("eventkit_cloud.tasks.export_tasks.get_export_filepath")
@patch("eventkit_cloud.tasks.export_tasks.gdalutils.convert")
@patch("celery.app.task.Task.request")
def test_run_shp_export_task(self, mock_request, mock_convert, mock_get_export_filepath):
celery_uid = str(uuid.uuid4())
type(mock_request).id = PropertyMock(return_value=celery_uid)
job_name = self.job.name.lower()
projection = 4326
mock_get_export_filepath.return_value = expected_outfile = "/path/to/file.ext"
expected_output_path = os.path.join(self.stage_dir, expected_outfile)
mock_convert.return_value = expected_output_path
previous_task_result = {"source": expected_output_path}
export_provider_task = DataProviderTaskRecord.objects.create(
run=self.run, status=TaskState.PENDING.value, provider=self.provider
)
saved_export_task = ExportTaskRecord.objects.create(
export_provider_task=export_provider_task, status=TaskState.PENDING.value, name=shp_export_task.name
)
shp_export_task.update_task_state(task_status=TaskState.RUNNING.value, task_uid=str(saved_export_task.uid))
result = shp_export_task.run(
run_uid=self.run.uid,
result=previous_task_result,
task_uid=str(saved_export_task.uid),
stage_dir=self.stage_dir,
job_name=job_name,
projection=projection,
)
mock_convert.assert_called_once_with(
driver="ESRI Shapefile",
input_file=expected_output_path,
output_file=expected_output_path,
task_uid=str(saved_export_task.uid),
boundary=None,
projection=4326,
)
self.assertEqual(expected_output_path, result["result"])
self.assertEqual(expected_output_path, result["source"])
@patch("eventkit_cloud.tasks.export_tasks.get_export_filepath")
@patch("eventkit_cloud.tasks.export_tasks.generate_qgs_style")
@patch("eventkit_cloud.tasks.export_tasks.convert_qgis_gpkg_to_kml")
@patch("eventkit_cloud.tasks.export_tasks.gdalutils.convert")
@patch("celery.app.task.Task.request")
def test_run_kml_export_task(
self, mock_request, mock_convert, mock_qgis_convert, mock_generate_qgs_style, mock_get_export_filepath
):
celery_uid = str(uuid.uuid4())
type(mock_request).id = PropertyMock(return_value=celery_uid)
job_name = self.job.name.lower()
projection = 4326
mock_get_export_filepath.return_value = expected_outfile = "/path/to/file.ext"
expected_output_path = os.path.join(self.stage_dir, expected_outfile)
mock_generate_qgs_style.return_value = qgs_file = "/style.qgs"
mock_convert.return_value = mock_qgis_convert.return_value = expected_output_path
previous_task_result = {"source": expected_output_path}
export_provider_task = DataProviderTaskRecord.objects.create(
run=self.run, status=TaskState.PENDING.value, provider=self.provider
)
saved_export_task = ExportTaskRecord.objects.create(
export_provider_task=export_provider_task, status=TaskState.PENDING.value, name=kml_export_task.name
)
kml_export_task.update_task_state(task_status=TaskState.RUNNING.value, task_uid=str(saved_export_task.uid))
result = kml_export_task.run(
run_uid=self.run.uid,
result=previous_task_result,
task_uid=str(saved_export_task.uid),
stage_dir=self.stage_dir,
job_name=job_name,
projection=projection,
)
try:
import qgis # noqa
mock_qgis_convert.assert_called_once_with(qgs_file, expected_output_path, stage_dir=self.stage_dir)
except ImportError:
mock_convert.assert_called_once_with(
driver="libkml",
input_file=expected_output_path,
output_file=expected_output_path,
task_uid=str(saved_export_task.uid),
projection=4326,
boundary=None,
)
self.assertEqual(expected_output_path, result["result"])
self.assertEqual(expected_output_path, result["source"])
@patch("eventkit_cloud.tasks.export_tasks.get_export_filepath")
@patch("eventkit_cloud.tasks.export_tasks.gdalutils.convert")
@patch("celery.app.task.Task.request")
def test_run_sqlite_export_task(self, mock_request, mock_convert, mock_get_export_filepath):
celery_uid = str(uuid.uuid4())
type(mock_request).id = PropertyMock(return_value=celery_uid)
job_name = self.job.name.lower()
projection = 4326
mock_get_export_filepath.return_value = expected_outfile = "/path/to/file.ext"
expected_output_path = os.path.join(self.stage_dir, expected_outfile)
mock_convert.return_value = expected_output_path
previous_task_result = {"source": expected_output_path}
export_provider_task = DataProviderTaskRecord.objects.create(
run=self.run, status=TaskState.PENDING.value, provider=self.provider
)
saved_export_task = ExportTaskRecord.objects.create(
export_provider_task=export_provider_task, status=TaskState.PENDING.value, name=sqlite_export_task.name
)
sqlite_export_task.update_task_state(task_status=TaskState.RUNNING.value, task_uid=str(saved_export_task.uid))
result = sqlite_export_task.run(
run_uid=self.run.uid,
result=previous_task_result,
task_uid=str(saved_export_task.uid),
stage_dir=self.stage_dir,
job_name=job_name,
projection=projection,
)
mock_convert.assert_called_once_with(
driver="SQLite",
input_file=expected_output_path,
output_file=expected_output_path,
task_uid=str(saved_export_task.uid),
projection=4326,
boundary=None,
)
self.assertEqual(expected_output_path, result["result"])
self.assertEqual(expected_output_path, result["source"])
@patch("eventkit_cloud.tasks.export_tasks.get_export_filepath")
@patch("eventkit_cloud.tasks.export_tasks.download_concurrently")
@patch("eventkit_cloud.tasks.helpers.download_data")
@patch("eventkit_cloud.tasks.export_tasks.gdalutils.convert")
@patch("eventkit_cloud.tasks.export_tasks.geopackage")
@patch("celery.app.task.Task.request")
def test_run_wfs_export_task(
self,
mock_request,
mock_gpkg,
mock_convert,
mock_download_data,
mock_download_concurrently,
mock_get_export_filepath,
):
celery_uid = str(uuid.uuid4())
type(mock_request).id = PropertyMock(return_value=celery_uid)
job_name = self.job.name.lower()
projection = 4326
expected_provider_slug = "wfs-service"
self.provider.export_provider_type = DataProviderType.objects.get(type_name="wfs")
self.provider.slug = expected_provider_slug
self.provider.config = None
self.provider.save()
mock_get_export_filepath.return_value = expected_outfile = "/path/to/file.ext"
expected_output_path = os.path.join(self.stage_dir, expected_outfile)
layer = "foo"
service_url = "https://abc.gov/WFSserver/"
expected_input_path = [
os.path.join(self.stage_dir, "chunk0.json"),
os.path.join(self.stage_dir, "chunk1.json"),
os.path.join(self.stage_dir, "chunk2.json"),
os.path.join(self.stage_dir, "chunk3.json"),
]
mock_convert.return_value = expected_output_path
mock_download_data.return_value = expected_input_path
previous_task_result = {"source": expected_output_path}
export_provider_task = DataProviderTaskRecord.objects.create(
run=self.run, status=TaskState.PENDING.value, provider=self.provider
)
saved_export_task = ExportTaskRecord.objects.create(
export_provider_task=export_provider_task, status=TaskState.PENDING.value, name=wfs_export_task.name
)
wfs_export_task.update_task_state(task_status=TaskState.RUNNING.value, task_uid=str(saved_export_task.uid))
mock_gpkg.check_content_exists.return_value = True
result = wfs_export_task.run(
run_uid=self.run.uid,
result=previous_task_result,
task_uid=str(saved_export_task.uid),
stage_dir=self.stage_dir,
job_name=job_name,
projection=projection,
service_url=service_url,
layer=layer,
bbox=[1, 2, 3, 4],
)
mock_convert.assert_called_once_with(
driver="gpkg",
input_file=expected_input_path,
output_file=expected_output_path,
task_uid=str(saved_export_task.uid),
projection=projection,
boundary=[1, 2, 3, 4],
layer_name=expected_provider_slug,
access_mode="append",
distinct_field=None,
)
self.assertEqual(expected_output_path, result["result"])
self.assertEqual(expected_output_path, result["source"])
mock_gpkg.check_content_exists.assert_called_once_with(expected_output_path)
result_b = wfs_export_task.run(
run_uid=self.run.uid,
result=previous_task_result,
task_uid=str(saved_export_task.uid),
stage_dir=self.stage_dir,
job_name=job_name,
projection=projection,
service_url=f"{service_url}/",
bbox=[1, 2, 3, 4],
)
self.assertEqual(expected_output_path, result_b["result"])
self.assertEqual(expected_output_path, result_b["source"])
url_1 = "https://abc.gov/wfs/services/x"
url_2 = "https://abc.gov/wfs/services/y"
layer_1 = "spam"
layer_2 = "ham"
config = f"""
vector_layers:
- name: '{layer_1}'
url: '{url_1}'
- name: '{layer_2}'
url: '{url_2}'
"""
expected_path_1 = f"{layer_1}.gpkg"
expected_path_2 = f"{layer_2}.gpkg"
expected_url_1 = (
f"{url_1}?SERVICE=WFS&VERSION=1.0.0&REQUEST=GetFeature&TYPENAME={layer_1}"
f"&SRSNAME=EPSG:{projection}&BBOX=BBOX_PLACEHOLDER"
)
expected_url_2 = (
f"{url_2}?SERVICE=WFS&VERSION=1.0.0&REQUEST=GetFeature&TYPENAME={layer_2}"
f"&SRSNAME=EPSG:{projection}&BBOX=BBOX_PLACEHOLDER"
)
expected_layers = {
layer_1: {
"task_uid": str(saved_export_task.uid),
"url": expected_url_1,
"path": expected_path_1,
"base_path": f"{self.stage_dir.rstrip('/')}/{layer_1}-{projection}",
"bbox": [1, 2, 3, 4],
"cert_info": None,
"layer_name": layer_1,
"projection": projection,
},
layer_2: {
"task_uid": str(saved_export_task.uid),
"url": expected_url_2,
"path": expected_path_2,
"base_path": f"{self.stage_dir.rstrip('/')}/{layer_2}-{projection}",
"bbox": [1, 2, 3, 4],
"cert_info": None,
"layer_name": layer_2,
"projection": projection,
},
}
mock_download_concurrently.return_value = expected_layers
mock_convert.reset_mock()
mock_get_export_filepath.side_effect = [expected_output_path, expected_path_1, expected_path_2]
# test with multiple layers
result_c = wfs_export_task.run(
run_uid=self.run.uid,
result=previous_task_result,
task_uid=str(saved_export_task.uid),
stage_dir=self.stage_dir,
job_name=job_name,
projection=projection,
service_url=service_url,
layer=layer,
config=config,
bbox=[1, 2, 3, 4],
)
_, args, _ = mock_download_concurrently.mock_calls[0]
self.assertEqual(list(args[0]), list(expected_layers.values()))
self.assertEqual(mock_convert.call_count, 2)
mock_convert.assert_any_call(
driver="gpkg",
input_file=expected_path_1,
output_file=expected_output_path,
task_uid=str(saved_export_task.uid),
projection=4326,
boundary=[1, 2, 3, 4],
access_mode="append",
layer_name=layer_1,
)
mock_convert.assert_any_call(
driver="gpkg",
input_file=expected_path_2,
output_file=expected_output_path,
task_uid=str(saved_export_task.uid),
projection=4326,
boundary=[1, 2, 3, 4],
access_mode="append",
layer_name=layer_2,
)
self.assertEqual(expected_output_path, result_c["result"])
self.assertEqual(expected_output_path, result_c["source"])
# test downloads with certs
mock_download_data.reset_mock()
mock_get_export_filepath.side_effect = [expected_output_path, expected_path_1, expected_path_2]
wfs_export_task.run(
run_uid=self.run.uid,
result=previous_task_result,
task_uid=str(saved_export_task.uid),
stage_dir=self.stage_dir,
job_name=job_name,
projection=projection,
service_url=service_url,
layer=layer,
bbox=[1, 2, 3, 4],
)
mock_download_data.assert_called_with(
str(saved_export_task.uid), ANY, expected_input_path[3], cert_info=None, task_points=400
)
@patch("eventkit_cloud.tasks.export_tasks.get_export_filepath")
@patch("eventkit_cloud.utils.gdalutils.convert")
@patch("celery.app.task.Task.request")
def test_mbtiles_export_task(self, mock_request, mock_convert, mock_get_export_filepath):
celery_uid = str(uuid.uuid4())
type(mock_request).id = PropertyMock(return_value=celery_uid)
job_name = self.job.name.lower()
input_projection = 4326
output_projection = 3857
driver = "MBTiles"
mock_get_export_filepath.return_value = expected_outfile = "/path/to/file.ext"
expected_output_path = os.path.join(self.stage_dir, expected_outfile)
mock_convert.return_value = expected_output_path
sample_input = "example.gpkg"
previous_task_result = {"source": sample_input}
export_provider_task = DataProviderTaskRecord.objects.create(
run=self.run, status=TaskState.PENDING.value, provider=self.provider
)
saved_export_task = ExportTaskRecord.objects.create(
export_provider_task=export_provider_task, status=TaskState.PENDING.value, name=mbtiles_export_task.name
)
mbtiles_export_task.update_task_state(task_status=TaskState.RUNNING.value, task_uid=str(saved_export_task.uid))
result = mbtiles_export_task.run(
run_uid=self.run.uid,
result=previous_task_result,
task_uid=str(saved_export_task.uid),
stage_dir=self.stage_dir,
job_name=job_name,
projection=output_projection,
)
mock_convert.assert_called_once_with(
driver=driver,
input_file=sample_input,
output_file=expected_output_path,
src_srs=input_projection,
task_uid=str(saved_export_task.uid),
projection=output_projection,
boundary=None,
use_translate=True,
)
self.assertEqual(expected_output_path, result["result"])
self.assertEqual(sample_input, result["source"])
@patch("eventkit_cloud.tasks.export_tasks.get_export_filepath")
@patch("eventkit_cloud.tasks.export_tasks.os.rename")
@patch("eventkit_cloud.tasks.export_tasks.gdalutils.convert")
@patch("celery.app.task.Task.request")
def test_run_gpkg_export_task(self, mock_request, mock_convert, mock_rename, mock_get_export_filepath):
celery_uid = str(uuid.uuid4())
type(mock_request).id = PropertyMock(return_value=celery_uid)
job_name = self.job.name.lower()
projection = 4326
mock_get_export_filepath.return_value = expected_outfile = "/path/to/file.gpkg"
expected_output_path = os.path.join(self.stage_dir, expected_outfile)
mock_rename.return_value = expected_output_path
previous_task_result = {"source": expected_output_path}
export_provider_task = DataProviderTaskRecord.objects.create(
run=self.run, status=TaskState.PENDING.value, provider=self.provider
)
saved_export_task = ExportTaskRecord.objects.create(
export_provider_task=export_provider_task, status=TaskState.PENDING.value, name=geopackage_export_task.name
)
result = geopackage_export_task(
run_uid=self.run.uid,
result=previous_task_result,
task_uid=str(saved_export_task.uid),
stage_dir=self.stage_dir,
job_name=job_name,
projection=projection,
)
mock_rename.assert_called_once_with(expected_output_path, expected_output_path)
self.assertEqual(expected_output_path, result["result"])
self.assertEqual(expected_output_path, result["source"])
example_input_file = "test.tif"
previous_task_result = {"source": example_input_file}
mock_convert.return_value = expected_output_path
result = geopackage_export_task(
run_uid=self.run.uid,
result=previous_task_result,
task_uid=str(saved_export_task.uid),
stage_dir=self.stage_dir,
job_name=job_name,
projection=projection,
)
mock_convert.assert_called_once_with(
driver="gpkg",
input_file=example_input_file,
output_file=expected_output_path,
task_uid=str(saved_export_task.uid),
projection=4326,
boundary=None,
)
self.assertEqual(expected_output_path, result["result"])
self.assertEqual(example_input_file, result["source"])
@patch("eventkit_cloud.tasks.export_tasks.sqlite3.connect")
@patch("eventkit_cloud.tasks.export_tasks.cancel_export_provider_task.run")
@patch("eventkit_cloud.tasks.export_tasks.get_export_filepath")
@patch("eventkit_cloud.tasks.export_tasks.get_export_task_record")
@patch("eventkit_cloud.tasks.export_tasks.os")
@patch("eventkit_cloud.tasks.export_tasks.gdalutils")
@patch("eventkit_cloud.tasks.export_tasks.update_progress")
@patch("eventkit_cloud.tasks.export_tasks.geopackage")
@patch("eventkit_cloud.tasks.export_tasks.FeatureSelection")
@patch("eventkit_cloud.tasks.export_tasks.pbf")
@patch("eventkit_cloud.tasks.export_tasks.overpass")
def test_osm_data_collection_pipeline(
self,
mock_overpass,
mock_pbf,
mock_feature_selection,
mock_geopackage,
mock_update_progress,
mock_gdalutils,
mock_os,
mock_get_export_task_record,
mock_get_export_filepath,
mock_cancel_provider_task,
mock_connect,
):
example_export_task_record_uid = "1234"
example_bbox = [-1, -1, 1, 1]
mock_get_export_filepath.return_value = example_gpkg = "/path/to/file.gpkg"
mock_geopackage.Geopackage.return_value = Mock(results=[Mock(parts=[example_gpkg])])
# Test with using overpass
example_overpass_query = "some_query; out;"
example_config = {"overpass_query": example_overpass_query}
osm_data_collection_pipeline(
example_export_task_record_uid, self.stage_dir, bbox=example_bbox, config=yaml.dump(example_config)
)
mock_connect.assert_called_once()
mock_overpass.Overpass.assert_called_once()
mock_pbf.OSMToPBF.assert_called_once()
mock_feature_selection.example.assert_called_once()
mock_cancel_provider_task.assert_not_called()
# Test canceling the provider task on an empty geopackage.
mock_geopackage.Geopackage().run.return_value = None
osm_data_collection_pipeline(
example_export_task_record_uid, self.stage_dir, bbox=example_bbox, config=yaml.dump(example_config)
)
mock_cancel_provider_task.assert_called_once()
mock_overpass.reset_mock()
mock_pbf.reset_mock()
mock_feature_selection.reset_mock()
mock_geopackage.reset_mock()
# Test with using pbf_file
example_pbf_file = "test.pbf"
example_config = {"pbf_file": example_pbf_file}
osm_data_collection_pipeline(
example_export_task_record_uid, self.stage_dir, bbox=example_bbox, config=yaml.dump(example_config)
)
mock_overpass.Overpass.assert_not_called()
mock_pbf.OSMToPBF.assert_not_called()
mock_feature_selection.assert_not_called()
@patch("eventkit_cloud.tasks.export_tasks.get_export_filepath")
@patch("eventkit_cloud.tasks.export_tasks.get_creation_options")
@patch("eventkit_cloud.tasks.export_tasks.get_export_task_record")
@patch("eventkit_cloud.tasks.export_tasks.gdalutils")
def test_geotiff_export_task(
self, mock_gdalutils, mock_get_export_task_record, mock_get_creation_options, mock_get_export_filepath
):
# TODO: This can be setup as a way to test the other ExportTasks without all the boilerplate.
ExportTask.__call__ = lambda *args, **kwargs: celery.Task.__call__(*args, **kwargs)
example_geotiff = "example.tif"
example_result = {"source": example_geotiff}
task_uid = "1234"
warp_params = {"warp": "params"}
translate_params = {"translate": "params"}
mock_get_creation_options.return_value = warp_params, translate_params
mock_get_export_filepath.return_value = expected_outfile = "/path/to/file.ext"
geotiff_export_task(result=example_result, task_uid=task_uid, stage_dir=self.stage_dir, job_name="job")
mock_gdalutils.convert.return_value = expected_outfile
mock_gdalutils.convert.assert_called_once_with(
boundary=None,
driver="gtiff",
input_file=f"GTIFF_RAW:{example_geotiff}",
output_file=expected_outfile,
task_uid=task_uid,
warp_params=warp_params,
translate_params=translate_params,
)
mock_gdalutils.reset_mock()
example_result = {"source": example_geotiff, "selection": "selection"}
mock_gdalutils.convert.return_value = expected_outfile
geotiff_export_task(result=example_result, task_uid=task_uid, stage_dir=self.stage_dir, job_name="job")
mock_gdalutils.convert.assert_called_once_with(
boundary="selection",
driver="gtiff",
input_file=f"GTIFF_RAW:{example_geotiff}",
output_file=expected_outfile,
task_uid=task_uid,
warp_params=warp_params,
translate_params=translate_params,
)
mock_gdalutils.reset_mock()
example_result = {"gtiff": expected_outfile}
geotiff_export_task(result=example_result, task_uid=task_uid, stage_dir=self.stage_dir, job_name="job")
mock_gdalutils.assert_not_called()
@patch("eventkit_cloud.tasks.export_tasks.get_export_filepath")
@patch("eventkit_cloud.tasks.export_tasks.get_export_task_record")
@patch("eventkit_cloud.tasks.export_tasks.gdalutils")
def test_nitf_export_task(self, mock_gdalutils, mock_get_export_task_record, mock_get_export_filepath):
ExportTask.__call__ = lambda *args, **kwargs: celery.Task.__call__(*args, **kwargs)
example_nitf = "example.nitf"
example_result = {"source": example_nitf}
task_uid = "1234"
mock_get_export_filepath.return_value = expected_outfile = "/path/to/file.ext"
nitf_export_task(result=example_result, task_uid=task_uid, stage_dir=self.stage_dir, job_name="job")
mock_gdalutils.convert.return_value = expected_outfile
mock_gdalutils.convert.assert_called_once_with(
creation_options=["ICORDS=G"],
driver="nitf",
input_file=example_nitf,
output_file=expected_outfile,
task_uid=task_uid,
)
mock_gdalutils.reset_mock()
nitf_export_task(result=example_result, task_uid=task_uid, stage_dir=self.stage_dir, job_name="job")
mock_gdalutils.convert.assert_called_once_with(
creation_options=["ICORDS=G"],
driver="nitf",
input_file=example_nitf,
output_file=expected_outfile,
task_uid=task_uid,
)
def test_pbf_export_task(self):
# TODO: This can be setup as a way to test the other ExportTasks without all the boilerplate.
ExportTask.__call__ = lambda *args, **kwargs: celery.Task.__call__(*args, **kwargs)
example_pbf = "example.pbf"
example_result = {"pbf": example_pbf}
expected_result = {"file_extension": "pbf", "driver": "OSM", "pbf": example_pbf, "result": example_pbf}
returned_result = pbf_export_task(example_result)
self.assertEquals(expected_result, returned_result)
@patch("eventkit_cloud.tasks.export_tasks.get_export_filepath")
@patch("eventkit_cloud.tasks.export_tasks.get_export_task_record")
@patch("eventkit_cloud.tasks.export_tasks.gdalutils.convert")
@patch("celery.app.task.Task.request")
def test_sqlite_export_task(
self, mock_request, mock_convert, mock_get_export_task_record, mock_get_export_filepath
):
ExportTask.__call__ = lambda *args, **kwargs: celery.Task.__call__(*args, **kwargs)
expected_provider_slug = "osm-generic"
expected_event = "event"
expected_label = "label"
mock_get_export_task_record.return_value = Mock(
export_provider_task=Mock(
run=Mock(job=Mock(event=expected_event)),
provider=Mock(slug=expected_provider_slug, data_type="vector", label=expected_label),
)
)
celery_uid = str(uuid.uuid4())
type(mock_request).id = PropertyMock(return_value=celery_uid)
job_name = self.job.name.lower()
projection = 4326
mock_get_export_filepath.return_value = expected_outfile = "/path/to/file.ext"
expected_output_path = os.path.join(self.stage_dir, expected_outfile)
mock_convert.return_value = expected_output_path
previous_task_result = {"source": expected_output_path}
export_provider_task = DataProviderTaskRecord.objects.create(
run=self.run, status=TaskState.PENDING.value, provider=self.provider
)
saved_export_task = ExportTaskRecord.objects.create(
export_provider_task=export_provider_task, status=TaskState.PENDING.value, name=sqlite_export_task.name
)
sqlite_export_task.update_task_state(task_status=TaskState.RUNNING.value, task_uid=str(saved_export_task.uid))
result = sqlite_export_task.run(
run_uid=self.run.uid,
result=previous_task_result,
task_uid=str(saved_export_task.uid),
stage_dir=self.stage_dir,
job_name=job_name,
projection=projection,
)
mock_convert.assert_called_once_with(
driver="SQLite",
input_file=expected_output_path,
output_file=expected_output_path,
task_uid=str(saved_export_task.uid),
projection=4326,
boundary=None,
)
self.assertEqual(expected_output_path, result["result"])
self.assertEqual(expected_output_path, result["source"])
@patch("eventkit_cloud.tasks.export_tasks.get_export_filepath")
@patch("eventkit_cloud.tasks.export_tasks.get_export_task_record")
@patch("eventkit_cloud.tasks.export_tasks.gdalutils")
def test_gpx_export_task(self, mock_gdalutils, mock_get_export_task_record, mock_get_export_filepath):
# TODO: This can be setup as a way to test the other ExportTasks without all the boilerplate.
ExportTask.__call__ = lambda *args, **kwargs: celery.Task.__call__(*args, **kwargs)
expected_provider_slug = "osm-generic"
expected_event = "event"
expected_label = "label"
mock_get_export_task_record.return_value = Mock(
export_provider_task=Mock(
run=Mock(job=Mock(event=expected_event)),
provider=Mock(slug=expected_provider_slug, data_type="vector", label=expected_label),
)
)
example_source = "example.pbf"
example_geojson = "example.geojson"
task_uid = "1234"
example_result = {"pbf": example_source, "selection": example_geojson}
mock_get_export_filepath.return_value = expected_outfile = "/path/to/file.ext"
expected_output_path = os.path.join(self.stage_dir, expected_outfile)
mock_gdalutils.convert.return_value = expected_output_path
expected_result = {
"pbf": example_source,
"file_extension": "gpx",
"driver": "GPX",
"result": expected_output_path,
"gpx": expected_output_path,
"selection": example_geojson,
}
returned_result = gpx_export_task(
result=example_result, task_uid=task_uid, stage_dir=self.stage_dir, job_name="job"
)
mock_gdalutils.convert.assert_called_once_with(
input_file=example_source,
output_file=expected_output_path,
driver="GPX",
dataset_creation_options=["GPX_USE_EXTENSIONS=YES"],
creation_options=["-explodecollections"],
boundary=example_geojson,
)
self.assertEqual(returned_result, expected_result)
@patch("eventkit_cloud.tasks.export_tasks.make_dirs")
@patch("eventkit_cloud.tasks.export_tasks.get_export_filepath")
@patch("eventkit_cloud.tasks.export_tasks.geopackage")
@patch("eventkit_cloud.tasks.export_tasks.download_concurrently")
@patch("eventkit_cloud.tasks.helpers.download_feature_data")
@patch("eventkit_cloud.tasks.export_tasks.gdalutils.convert")
@patch("celery.app.task.Task.request")
def test_run_arcgis_feature_service_export_task(
self,
mock_request,
mock_convert,
mock_download_feature_data,
mock_download_concurrently,
mock_geopackage,
mock_get_export_filepath,
mock_makedirs,
):
celery_uid = str(uuid.uuid4())
type(mock_request).id = PropertyMock(return_value=celery_uid)
job_name = self.job.name.lower()
projection = 4326
expected_provider_slug = "arcgis-feature-service"
self.provider.export_provider_type = DataProviderType.objects.get(type_name="arcgis-feature")
self.provider.slug = expected_provider_slug
self.provider.config = None
self.provider.save()
mock_get_export_filepath.return_value = expected_outfile = "/path/to/file.ext"
expected_output_path = os.path.join(self.stage_dir, expected_outfile)
expected_esrijson = [
os.path.join(self.stage_dir, "chunk0.json"),
os.path.join(self.stage_dir, "chunk1.json"),
os.path.join(self.stage_dir, "chunk2.json"),
os.path.join(self.stage_dir, "chunk3.json"),
]
service_url = "https://abc.gov/arcgis/services/x"
bbox = [1, 2, 3, 4]
query_string = "query?where=objectid=objectid&outfields=*&f=json&geometry=BBOX_PLACEHOLDER"
expected_input_url = (
"https://abc.gov/arcgis/services/x/query?where=objectid=objectid&"
"outfields=*&f=json&geometry=2.0%2C%202.0%2C%203.0%2C%203.0"
)
mock_convert.return_value = expected_output_path
mock_download_feature_data.side_effect = expected_esrijson
previous_task_result = {"source": expected_input_url}
export_provider_task = DataProviderTaskRecord.objects.create(
run=self.run, status=TaskState.PENDING.value, provider=self.provider
)
saved_export_task = ExportTaskRecord.objects.create(
export_provider_task=export_provider_task,
status=TaskState.PENDING.value,
name=arcgis_feature_service_export_task.name,
)
mock_geopackage.check_content_exists.return_value = True
# test without trailing slash
result_a = arcgis_feature_service_export_task.run(
run_uid=self.run.uid,
result=previous_task_result,
task_uid=str(saved_export_task.uid),
stage_dir=self.stage_dir,
job_name=job_name,
projection=projection,
service_url=service_url,
bbox=bbox,
)
mock_download_feature_data.assert_called_with(
str(saved_export_task.uid), expected_input_url, ANY, cert_info=None, task_points=400
)
mock_convert.assert_called_once_with(
driver="gpkg",
input_file=expected_esrijson,
output_file=expected_output_path,
task_uid=str(saved_export_task.uid),
projection=4326,
layer_name=expected_provider_slug,
boundary=bbox,
access_mode="append",
distinct_field=None,
)
self.assertEqual(expected_output_path, result_a["result"])
self.assertEqual(expected_output_path, result_a["source"])
mock_download_feature_data.reset_mock(return_value=True, side_effect=True)
# test with trailing slash
result_b = arcgis_feature_service_export_task.run(
run_uid=self.run.uid,
result=previous_task_result,
task_uid=str(saved_export_task.uid),
stage_dir=self.stage_dir,
job_name=job_name,
projection=projection,
service_url=f"{service_url}/",
bbox=bbox,
)
self.assertEqual(expected_output_path, result_b["result"])
self.assertEqual(expected_output_path, result_b["source"])
url_1 = "https://abc.gov/arcgis/services/x"
url_2 = "https://abc.gov/arcgis/services/y"
layer_name_1 = "foo"
layer_name_2 = "bar"
expected_field = "baz"
config = f"""
vector_layers:
- name: '{layer_name_1}'
url: '{url_1}'
- name: '{layer_name_2}'
url: '{url_2}'
distinct_field: '{expected_field}'
"""
expected_path_1 = f"{layer_name_1}.gpkg"
expected_path_2 = f"{layer_name_2}.gpkg"
expected_url_1 = f"{url_1}/{query_string}"
expected_url_2 = f"{url_2}/{query_string}"
expected_layers = {
layer_name_1: {
"task_uid": str(saved_export_task.uid),
"url": expected_url_1,
"path": expected_path_1,
"base_path": f"{self.stage_dir.rstrip('/')}/{layer_name_1}-{projection}",
"bbox": [1, 2, 3, 4],
"cert_info": None,
"projection": projection,
"layer_name": layer_name_1,
"distinct_field": None,
},
layer_name_2: {
"task_uid": str(saved_export_task.uid),
"url": expected_url_2,
"path": expected_path_2,
"base_path": f"{self.stage_dir.rstrip('/')}/{layer_name_2}-{projection}",
"bbox": [1, 2, 3, 4],
"cert_info": None,
"projection": projection,
"layer_name": layer_name_2,
"distinct_field": expected_field,
},
}
mock_download_concurrently.return_value = expected_layers
mock_convert.reset_mock()
mock_download_feature_data.reset_mock()
mock_get_export_filepath.side_effect = [expected_output_path, expected_path_1, expected_path_2]
# test with multiple layers
result_c = arcgis_feature_service_export_task.run(
run_uid=self.run.uid,
result=previous_task_result,
task_uid=str(saved_export_task.uid),
stage_dir=self.stage_dir,
job_name=job_name,
projection=projection,
service_url=f"{service_url}/",
bbox=bbox,
config=config,
)
_, args, _ = mock_download_concurrently.mock_calls[0]
self.assertEqual(list(args[0]), list(expected_layers.values()))
self.assertEqual(mock_convert.call_count, 2)
mock_convert.assert_any_call(
driver="gpkg",
input_file=expected_path_1,
output_file=expected_output_path,
task_uid=str(saved_export_task.uid),
projection=4326,
boundary=bbox,
access_mode="append",
layer_name=layer_name_1,
)
mock_convert.assert_any_call(
driver="gpkg",
input_file=expected_path_2,
output_file=expected_output_path,
task_uid=str(saved_export_task.uid),
projection=4326,
boundary=bbox,
access_mode="append",
layer_name=layer_name_2,
)
self.assertEqual(expected_output_path, result_c["result"])
self.assertEqual(expected_output_path, result_c["source"])
# test downloads with certs
mock_download_feature_data.reset_mock()
mock_get_export_filepath.side_effect = [expected_output_path, expected_path_1, expected_path_2]
arcgis_feature_service_export_task.run(
run_uid=123,
result=previous_task_result,
task_uid=str(saved_export_task.uid),
stage_dir="dir",
job_name="job",
projection=projection,
service_url=url_1,
bbox=bbox,
)
mock_download_feature_data.assert_called_with(
str(saved_export_task.uid), expected_input_url, "dir/chunk3.json", cert_info=None, task_points=400
)
@patch("eventkit_cloud.tasks.export_tasks.get_export_filepath")
@patch("celery.app.task.Task.request")
@patch("eventkit_cloud.utils.mapproxy.MapproxyGeopackage")
def test_run_external_raster_service_export_task(self, mock_service, mock_request, mock_get_export_filepath):
celery_uid = str(uuid.uuid4())
type(mock_request).id = PropertyMock(return_value=celery_uid)
service_to_gpkg = mock_service.return_value
job_name = self.job.name.lower()
service_to_gpkg.convert.return_value = expected_output_path = os.path.join(self.stage_dir, f"{job_name}.gpkg")
export_provider_task = DataProviderTaskRecord.objects.create(
run=self.run, status=TaskState.PENDING.value, provider=self.provider
)
saved_export_task = ExportTaskRecord.objects.create(
export_provider_task=export_provider_task, status=TaskState.PENDING.value, name=mapproxy_export_task.name
)
mapproxy_export_task.update_task_state(task_status=TaskState.RUNNING.value, task_uid=str(saved_export_task.uid))
result = mapproxy_export_task.run(
run_uid=self.run.uid, task_uid=str(saved_export_task.uid), stage_dir=self.stage_dir, job_name=job_name
)
service_to_gpkg.convert.assert_called_once()
self.assertEqual(expected_output_path, result["result"])
# test the tasks update_task_state method
run_task = ExportTaskRecord.objects.get(celery_uid=celery_uid)
self.assertIsNotNone(run_task)
self.assertEqual(TaskState.RUNNING.value, run_task.status)
service_to_gpkg.convert.side_effect = Exception("Task Failed")
with self.assertRaises(Exception):
mapproxy_export_task.run(
run_uid=self.run.uid, task_uid=str(saved_export_task.uid), stage_dir=self.stage_dir, job_name=job_name
)
def test_task_on_failure(self):
celery_uid = str(uuid.uuid4())
# assume task is running
export_provider_task = DataProviderTaskRecord.objects.create(
run=self.run, name="Shapefile Export", provider=self.provider
)
test_export_task_record = ExportTaskRecord.objects.create(
export_provider_task=export_provider_task,
celery_uid=celery_uid,
status=TaskState.RUNNING.value,
name=shp_export_task.name,
)
try:
raise ValueError("some unexpected error")
except ValueError as e:
exc = e
exc_info = sys.exc_info()
einfo = ExceptionInfo(exc_info=exc_info)
shp_export_task.task_failure(
exc, task_id=test_export_task_record.uid, einfo=einfo, args={}, kwargs={"run_uid": str(self.run.uid)}
)
task = ExportTaskRecord.objects.get(celery_uid=celery_uid)
self.assertIsNotNone(task)
exception = task.exceptions.all()[0]
exc_info = pickle.loads(exception.exception.encode()).exc_info
error_type, msg = exc_info[0], exc_info[1]
self.assertEqual(error_type, ValueError)
self.assertEqual("some unexpected error", str(msg))
@patch("eventkit_cloud.tasks.export_tasks.get_data_package_manifest")
@patch("eventkit_cloud.tasks.export_tasks.gdalutils.retry")
@patch("shutil.copy")
@patch("os.remove")
@patch("eventkit_cloud.tasks.export_tasks.ZipFile")
@patch("os.walk")
@patch("os.path.getsize")
def test_zipfile_task(
self, os_path_getsize, mock_os_walk, mock_zipfile, remove, copy, mock_retry, mock_get_data_package_manifest
):
os_path_getsize.return_value = 20
class MockZipFile:
def __init__(self):
self.files = {}
def __iter__(self):
return iter(self.files)
def write(self, filename, **kw):
arcname = kw.get("arcname", filename)
self.files[arcname] = filename
def __exit__(self, *args, **kw):
pass
def __enter__(self, *args, **kw):
return self
def testzip(self):
return None
expected_archived_files = {
"MANIFEST/manifest.xml": "MANIFEST/manifest.xml",
"data/osm/file1.txt": "osm/file1.txt",
"data/osm/file2.txt": "osm/file2.txt",
}
run_uid = str(self.run.uid)
self.run.job.include_zipfile = True
self.run.job.event = "test"
self.run.job.save()
run_zip_file = RunZipFile.objects.create(run=self.run)
zipfile = MockZipFile()
mock_zipfile.return_value = zipfile
provider_slug = "osm"
zipfile_path = os.path.join(self.stage_dir, "{0}".format(run_uid), provider_slug, "test.gpkg")
expected_manifest_file = os.path.join("MANIFEST", "manifest.xml")
mock_get_data_package_manifest.return_value = expected_manifest_file
files = {
"{0}/file1.txt".format(provider_slug): "data/{0}/file1.txt".format(provider_slug),
"{0}/file2.txt".format(provider_slug): "data/{0}/file2.txt".format(provider_slug),
}
mock_os_walk.return_value = [
(
os.path.join(self.stage_dir, run_uid, provider_slug),
None,
["test.gpkg", "test.om5", "test.osm"], # om5 and osm should get filtered out
)
]
result = zip_files(files=files, run_zip_file_uid=run_zip_file.uid, file_path=zipfile_path)
self.assertEqual(zipfile.files, expected_archived_files)
self.assertEqual(result, zipfile_path)
mock_get_data_package_manifest.assert_called_once()
zipfile.testzip = Exception("Bad Zip")
with self.assertRaises(Exception):
zip_files(files=files, file_path=zipfile_path)
@patch("celery.app.task.Task.request")
@patch("eventkit_cloud.tasks.export_tasks.geopackage")
def test_run_bounds_export_task(self, mock_geopackage, mock_request):
celery_uid = str(uuid.uuid4())
type(mock_request).id = PropertyMock(return_value=celery_uid)
job_name = self.job.name.lower()
provider_slug = "provider_slug"
mock_geopackage.add_geojson_to_geopackage.return_value = os.path.join(
self.stage_dir, "{}_bounds.gpkg".format(provider_slug)
)
expected_output_path = os.path.join(self.stage_dir, "{}_bounds.gpkg".format(provider_slug))
export_provider_task = DataProviderTaskRecord.objects.create(run=self.run, provider=self.provider)
saved_export_task = ExportTaskRecord.objects.create(
export_provider_task=export_provider_task, status=TaskState.PENDING.value, name=bounds_export_task.name
)
bounds_export_task.update_task_state(task_status=TaskState.RUNNING.value, task_uid=str(saved_export_task.uid))
result = bounds_export_task.run(
run_uid=self.run.uid, task_uid=str(saved_export_task.uid), stage_dir=self.stage_dir, provider_slug=job_name
)
self.assertEqual(expected_output_path, result["result"])
# test the tasks update_task_state method
run_task = ExportTaskRecord.objects.get(celery_uid=celery_uid)
self.assertIsNotNone(run_task)
self.assertEqual(TaskState.RUNNING.value, run_task.status)
@override_settings(CELERY_GROUP_NAME="test")
@patch("eventkit_cloud.tasks.task_factory.TaskFactory")
@patch("eventkit_cloud.tasks.export_tasks.ExportRun")
@patch("eventkit_cloud.tasks.export_tasks.socket")
def test_pickup_run_task(self, socket, mock_export_run, task_factory):
mock_run = MagicMock()
mock_run.uid = self.run.uid
mock_run.status = TaskState.SUBMITTED.value
# This would normally return providers.
mock_run.data_provider_task_records.exclude.return_value = True
mock_export_run.objects.get.return_value = mock_run
socket.gethostname.return_value = "test"
self.assertEqual("Pickup Run", pick_up_run_task.name)
pick_up_run_task.run(run_uid=mock_run.uid, user_details={"username": "test_pickup_run_task"})
task_factory.assert_called_once()
expected_user_details = {"username": "test_pickup_run_task"}
task_factory.return_value.parse_tasks.assert_called_once_with(
run_uid=mock_run.uid,
user_details=expected_user_details,
worker="test",
run_zip_file_slug_sets=None,
session_token=None,
queue_group="test",
)
mock_run.download_data.assert_called_once()
@patch("eventkit_cloud.tasks.export_tasks.logger")
@patch("shutil.rmtree")
@patch("os.path.isdir")
def test_finalize_run_task_after_return(self, isdir, rmtree, logger):
celery_uid = str(uuid.uuid4())
run_uid = self.run.uid
isdir.return_value = True
export_provider_task = DataProviderTaskRecord.objects.create(
run=self.run, name="Shapefile Export", provider=self.provider
)
ExportTaskRecord.objects.create(
export_provider_task=export_provider_task,
celery_uid=celery_uid,
status="SUCCESS",
name="Default Shapefile Export",
)
finalize_run_task.after_return("status", {"stage_dir": self.stage_dir}, run_uid, (), {}, "Exception Info")
isdir.assert_called_with(self.stage_dir)
rmtree.assert_called_with(self.stage_dir)
rmtree.side_effect = IOError()
finalize_run_task.after_return("status", {"stage_dir": self.stage_dir}, run_uid, (), {}, "Exception Info")
rmtree.assert_called_with(self.stage_dir)
self.assertRaises(IOError, rmtree)
logger.error.assert_called_once()
@patch("eventkit_cloud.tasks.export_tasks.EmailMultiAlternatives")
def test_finalize_run_task(self, email):
celery_uid = str(uuid.uuid4())
run_uid = self.run.uid
export_provider_task = DataProviderTaskRecord.objects.create(
status=TaskState.SUCCESS.value, run=self.run, name="Shapefile Export", provider=self.provider
)
ExportTaskRecord.objects.create(
export_provider_task=export_provider_task,
celery_uid=celery_uid,
status=TaskState.SUCCESS.value,
name="Default Shapefile Export",
)
self.assertEqual("Finalize Run Task", finalize_run_task.name)
finalize_run_task.run(run_uid=run_uid, stage_dir=self.stage_dir)
email().send.assert_called_once()
@patch("eventkit_cloud.tasks.export_tasks.RocketChat")
@patch("eventkit_cloud.tasks.export_tasks.EmailMultiAlternatives")
@patch("shutil.rmtree")
@patch("os.path.isdir")
def test_export_task_error_handler(self, isdir, rmtree, email, rocket_chat):
celery_uid = str(uuid.uuid4())
task_id = str(uuid.uuid4())
run_uid = self.run.uid
site_url = settings.SITE_URL
url = "{0}/status/{1}".format(site_url.rstrip("/"), self.run.job.uid)
os.environ["ROCKETCHAT_NOTIFICATIONS"] = json.dumps(
{"auth_token": "auth_token", "user_id": "user_id", "channels": ["channel"], "url": "http://api.example.dev"}
)
with self.settings(
ROCKETCHAT_NOTIFICATIONS={
"auth_token": "auth_token",
"user_id": "user_id",
"channels": ["channel"],
"url": "http://api.example.dev",
}
):
rocketchat_notifications = settings.ROCKETCHAT_NOTIFICATIONS
channel = rocketchat_notifications["channels"][0]
message = f"@here: A DataPack has failed during processing. {url}"
export_provider_task = DataProviderTaskRecord.objects.create(
run=self.run, name="Shapefile Export", provider=self.provider
)
ExportTaskRecord.objects.create(
export_provider_task=export_provider_task,
uid=task_id,
celery_uid=celery_uid,
status=TaskState.FAILED.value,
name="Default Shapefile Export",
)
self.assertEqual("Export Task Error Handler", export_task_error_handler.name)
export_task_error_handler.run(run_uid=run_uid, task_id=task_id, stage_dir=self.stage_dir)
isdir.assert_any_call(self.stage_dir)
rmtree.assert_called_once_with(self.stage_dir)
email().send.assert_called_once()
rocket_chat.assert_called_once_with(**rocketchat_notifications)
rocket_chat().post_message.assert_called_once_with(channel, message)
@patch("eventkit_cloud.tasks.export_tasks.kill_task")
def test_cancel_task(self, mock_kill_task):
worker_name = "test_worker"
task_pid = 55
celery_uid = uuid.uuid4()
with patch("eventkit_cloud.jobs.signals.Group") as mock_group:
mock_group.objects.get.return_value = self.group
user = User.objects.create(username="test_user", password="test_password", email="test@email.com")
export_provider_task = DataProviderTaskRecord.objects.create(
run=self.run, name="test_provider_task", provider=self.provider, status=TaskState.PENDING.value
)
export_task = ExportTaskRecord.objects.create(
export_provider_task=export_provider_task,
status=TaskState.PENDING.value,
name="test_task",
celery_uid=celery_uid,
pid=task_pid,
worker=worker_name,
)
self.assertEqual("Cancel Export Provider Task", cancel_export_provider_task.name)
cancel_export_provider_task.run(
data_provider_task_uid=export_provider_task.uid, canceling_username=user.username
)
mock_kill_task.apply_async.assert_called_once_with(
kwargs={"task_pid": task_pid, "celery_uid": celery_uid},
queue="{0}.priority".format(worker_name),
priority=TaskPriority.CANCEL.value,
routing_key="{0}.priority".format(worker_name),
)
export_task = ExportTaskRecord.objects.get(uid=export_task.uid)
export_provider_task = DataProviderTaskRecord.objects.get(uid=export_provider_task.uid)
self.assertEqual(export_task.status, TaskState.CANCELED.value)
self.assertEqual(export_provider_task.status, TaskState.CANCELED.value)
def test_parse_result(self):
result = parse_result(None, None)
self.assertIsNone(result)
task_result = [{"test": True}]
expected_result = True
returned_result = parse_result(task_result, "test")
self.assertEqual(expected_result, returned_result)
task_result = {"test": True}
expected_result = True
returned_result = parse_result(task_result, "test")
self.assertEqual(expected_result, returned_result)
def test_finalize_export_provider_task(self):
worker_name = "test_worker"
task_pid = 55
filename = "test.gpkg"
celery_uid = uuid.uuid4()
run_uid = self.run.uid
self.job.include_zipfile = True
self.job.save()
export_provider_task = DataProviderTaskRecord.objects.create(
run=self.run, name="test_provider_task", status=TaskState.COMPLETED.value, provider=self.provider
)
result = FileProducingTaskResult.objects.create(filename=filename, size=10)
ExportTaskRecord.objects.create(
export_provider_task=export_provider_task,
status=TaskState.COMPLETED.value,
name="test_task",
celery_uid=celery_uid,
pid=task_pid,
worker=worker_name,
result=result,
)
download_root = settings.EXPORT_DOWNLOAD_ROOT.rstrip("\/")
run_dir = os.path.join(download_root, str(run_uid))
finalize_export_provider_task.run(
result={"status": TaskState.SUCCESS.value},
run_uid=self.run.uid,
data_provider_task_uid=export_provider_task.uid,
run_dir=run_dir,
status=TaskState.COMPLETED.value,
)
export_provider_task.refresh_from_db()
self.assertEqual(export_provider_task.status, TaskState.COMPLETED.value)
@patch("eventkit_cloud.tasks.export_tasks.progressive_kill")
@patch("eventkit_cloud.tasks.export_tasks.AsyncResult")
def test_kill_task(self, async_result, mock_progressive_kill):
# Ensure that kill isn't called with default.
task_pid = -1
celery_uid = uuid.uuid4()
self.assertEqual("Kill Task", kill_task.name)
kill_task.run(task_pid=task_pid, celery_uid=celery_uid)
mock_progressive_kill.assert_not_called()
# Ensure that kill is not called with an invalid state
task_pid = 55
async_result.return_value = Mock(state=celery.states.FAILURE)
self.assertEqual("Kill Task", kill_task.name)
kill_task.run(task_pid=task_pid, celery_uid=celery_uid)
mock_progressive_kill.assert_not_called()
# Ensure that kill is called with a valid pid
task_pid = 55
async_result.return_value = Mock(state=celery.states.STARTED)
self.assertEqual("Kill Task", kill_task.name)
kill_task.run(task_pid=task_pid, celery_uid=celery_uid)
mock_progressive_kill.assert_called_once_with(task_pid)
@patch("eventkit_cloud.tasks.export_tasks.ExportRun")
def test_wait_for_providers_task(self, mock_export_run):
mock_run_uid = str(uuid.uuid4())
mock_provider_task = Mock(status=TaskState.SUCCESS.value)
mock_export_run.objects.filter().first.return_value = Mock()
mock_export_run.objects.filter().first().data_provider_task_records.filter.return_value = [mock_provider_task]
callback_task = MagicMock()
apply_args = {"arg1": "example_value"}
wait_for_providers_task(run_uid=mock_run_uid, callback_task=callback_task, apply_args=apply_args)
callback_task.apply_async.assert_called_once_with(**apply_args)
callback_task.reset_mock()
mock_provider_task = Mock(status=TaskState.RUNNING.value)
mock_export_run.objects.filter().first.return_value = Mock()
mock_export_run.objects.filter().first().data_provider_task_records.filter.return_value = [mock_provider_task]
wait_for_providers_task(run_uid=mock_run_uid, callback_task=callback_task, apply_args=apply_args)
callback_task.apply_async.assert_not_called()
with self.assertRaises(Exception):
mock_export_run.reset_mock()
mock_export_run.objects.filter().first().__nonzero__.return_value = False
wait_for_providers_task(run_uid=mock_run_uid, callback_task=callback_task, apply_args=apply_args)
@patch("eventkit_cloud.tasks.export_tasks.get_export_filepath")
@patch("eventkit_cloud.tasks.export_tasks.get_arcgis_templates")
@patch("eventkit_cloud.tasks.export_tasks.get_metadata")
@patch("eventkit_cloud.tasks.export_tasks.zip_files")
@patch("eventkit_cloud.tasks.export_tasks.get_human_readable_metadata_document")
@patch("eventkit_cloud.tasks.export_tasks.get_style_files")
@patch("eventkit_cloud.tasks.export_tasks.json")
@patch("eventkit_cloud.tasks.export_tasks.generate_qgs_style")
@patch("os.path.join", side_effect=lambda *args: args[-1])
@patch("eventkit_cloud.tasks.export_tasks.get_export_task_record")
@patch("eventkit_cloud.tasks.export_tasks.DataProviderTaskRecord")
def test_create_zip_task(
self,
mock_DataProviderTaskRecord,
mock_get_export_task_record,
join,
mock_generate_qgs_style,
mock_json,
mock_get_style_files,
mock_get_human_readable_metadata_document,
mock_zip_files,
mock_get_metadata,
mock_get_arcgis_templates,
mock_get_export_filepath,
):
meta_files = {}
mock_get_style_files.return_value = style_files = {"/styles.png": "icons/styles.png"}
meta_files.update(style_files)
mock_get_arcgis_templates.return_value = arcgis_files = {"/arcgis/create_aprx.py": "arcgis/create_aprx.pyt"}
meta_files.update(arcgis_files)
mock_get_human_readable_metadata_document.return_value = human_metadata_doc = {
"/human_metadata.txt": "/human_metadata.txt"
}
meta_files.update(human_metadata_doc)
mock_generate_qgs_style.return_value = qgis_file = {"/style.qgs": "/style.qgs"}
meta_files.update(qgis_file)
include_files = {
"/var/lib/eventkit/exports_stage/7fadf34e-58f9-4bb8-ab57-adc1015c4269/osm/test.gpkg": "osm/test.gpkg",
"/var/lib/eventkit/exports_stage/7fadf34e-58f9-4bb8-ab57-adc1015c4269/osm/osm_selection.geojson": "osm/osm_selection.geojson", # NOQA
}
include_files.update(meta_files)
metadata = {
"aoi": "AOI",
"bbox": [-1, -1, 1, 1],
"data_sources": {
"osm": {
"copyright": None,
"description": "OpenStreetMap vector data provided in a custom thematic schema. \r\n\t\r\n\t"
"Data is grouped into separate tables (e.g. water, roads...).",
"file_path": "data/osm/test-osm-20181101.gpkg",
"file_type": ".gpkg",
"full_file_path": "/var/lib/eventkit/exports_stage/7fadf34e-58f9-4bb8-ab57-adc1015c4269/osm/"
"test.gpkg",
"last_update": "2018-10-29T04:35:02Z\n",
"metadata": "https://overpass-server.com/overpass/interpreter",
"name": "OpenStreetMap Data (Themes)",
"slug": "osm",
"type": "osm",
"uid": "0d08ddf6-35c1-464f-b271-75f6911c3f78",
}
},
"date": "20181101",
"description": "Test",
"has_elevation": False,
"has_raster": True,
"include_files": include_files,
"name": "test",
"project": "Test",
"run_uid": "7fadf34e-58f9-4bb8-ab57-adc1015c4269",
"url": "http://cloud.eventkit.test/status/2010025c-6d61-4a0b-8d5d-ff9c657259eb",
}
data_provider_task_record_uids = ["0d08ddf6-35c1-464f-b271-75f6911c3f78"]
mock_get_metadata.return_value = metadata
run_zip_file = RunZipFile.objects.create(run=self.run)
expected_zip = f"{metadata['name']}.zip"
mock_get_export_filepath.return_value = expected_zip
mock_zip_files.return_value = expected_zip
returned_zip = create_zip_task.run(
task_uid="UID",
data_provider_task_record_uids=data_provider_task_record_uids,
run_zip_file_uid=run_zip_file.uid,
)
mock_generate_qgs_style.assert_called_once_with(metadata)
mock_zip_files.assert_called_once_with(
files=metadata["include_files"],
run_zip_file_uid=run_zip_file.uid,
meta_files=meta_files,
file_path=expected_zip,
metadata=metadata,
)
mock_get_export_task_record.assert_called_once()
self.assertEqual(returned_zip, {"result": expected_zip})
def test_zip_file_task_invalid_params(self):
with self.assertRaises(Exception):
include_files = []
file_path = "/test/path.zip"
res = zip_files(include_files, file_path=file_path)
self.assertIsNone(res)
with self.assertRaises(Exception):
include_files = ["test1", "test2"]
file_path = ""
res = zip_files(include_files, file_path=file_path)
self.assertIsNone(res)
@patch("eventkit_cloud.tasks.export_tasks.get_export_filepath")
@patch("eventkit_cloud.tasks.export_tasks.download_data")
@patch("eventkit_cloud.tasks.export_tasks.gdalutils.convert")
@patch("celery.app.task.Task.request")
def test_vector_file_export_task(self, mock_request, mock_convert, mock_download_data, mock_get_export_filepath):
celery_uid = str(uuid.uuid4())
type(mock_request).id = PropertyMock(return_value=celery_uid)
job_name = self.job.name.lower()
projection = 4326
expected_provider_slug = "vector-file"
self.provider.export_provider_type = DataProviderType.objects.get(type_name="vector-file")
self.provider.slug = expected_provider_slug
self.provider.config = None
self.provider.save()
mock_get_export_filepath.return_value = expected_outfile = "/path/to/file.ext"
expected_output_path = os.path.join(self.stage_dir, expected_outfile)
layer = "foo"
config = test_cert_info
service_url = "https://abc.gov/file.geojson"
mock_convert.return_value = expected_output_path
mock_download_data.return_value = service_url
previous_task_result = {"source": expected_output_path}
export_provider_task = DataProviderTaskRecord.objects.create(
run=self.run, status=TaskState.PENDING.value, provider=self.provider
)
saved_export_task = ExportTaskRecord.objects.create(
export_provider_task=export_provider_task, status=TaskState.PENDING.value, name=vector_file_export_task.name
)
vector_file_export_task.update_task_state(
task_status=TaskState.RUNNING.value, task_uid=str(saved_export_task.uid)
)
result = vector_file_export_task.run(
run_uid=self.run.uid,
result=previous_task_result,
task_uid=str(saved_export_task.uid),
stage_dir=self.stage_dir,
job_name=job_name,
projection=projection,
service_url=service_url,
layer=layer,
config=config,
)
mock_convert.assert_called_once_with(
driver="gpkg",
input_file=expected_output_path,
output_file=expected_output_path,
task_uid=str(saved_export_task.uid),
projection=projection,
boundary=None,
layer_name=expected_provider_slug,
is_raster=False,
)
self.assertEqual(expected_output_path, result["result"])
self.assertEqual(expected_output_path, result["source"])
self.assertEqual(expected_output_path, result["gpkg"])
mock_download_data.assert_called_once_with(
str(saved_export_task.uid),
service_url,
expected_output_path,
)
@patch("eventkit_cloud.tasks.export_tasks.get_export_filepath")
@patch("eventkit_cloud.tasks.export_tasks.download_data")
@patch("eventkit_cloud.tasks.export_tasks.gdalutils.convert")
@patch("celery.app.task.Task.request")
def test_raster_file_export_task(self, mock_request, mock_convert, mock_download_data, mock_get_export_filepath):
celery_uid = str(uuid.uuid4())
type(mock_request).id = PropertyMock(return_value=celery_uid)
job_name = self.job.name.lower()
projection = 4326
expected_provider_slug = "raster-file"
self.provider.export_provider_type = DataProviderType.objects.get(type_name="raster-file")
self.provider.slug = expected_provider_slug
self.provider.config = None
self.provider.save()
mock_get_export_filepath.return_value = expected_outfile = "/path/to/file.ext"
expected_output_path = os.path.join(self.stage_dir, expected_outfile)
layer = "foo"
config = test_cert_info
service_url = "https://abc.gov/file.geojson"
mock_convert.return_value = expected_output_path
mock_download_data.return_value = service_url
previous_task_result = {"source": expected_output_path}
export_provider_task = DataProviderTaskRecord.objects.create(
run=self.run, status=TaskState.PENDING.value, provider=self.provider
)
saved_export_task = ExportTaskRecord.objects.create(
export_provider_task=export_provider_task, status=TaskState.PENDING.value, name=raster_file_export_task.name
)
raster_file_export_task.update_task_state(
task_status=TaskState.RUNNING.value, task_uid=str(saved_export_task.uid)
)
result = raster_file_export_task.run(
run_uid=self.run.uid,
result=previous_task_result,
task_uid=str(saved_export_task.uid),
stage_dir=self.stage_dir,
job_name=job_name,
projection=projection,
service_url=service_url,
layer=layer,
config=config,
)
mock_convert.assert_called_once_with(
driver="gpkg",
input_file=expected_output_path,
output_file=expected_output_path,
task_uid=str(saved_export_task.uid),
projection=projection,
boundary=None,
is_raster=True,
)
self.assertEqual(expected_output_path, result["result"])
self.assertEqual(expected_output_path, result["source"])
self.assertEqual(expected_output_path, result["gpkg"])
mock_download_data.assert_called_once_with(
str(saved_export_task.uid),
service_url,
expected_output_path,
)
@patch("eventkit_cloud.tasks.export_tasks.parse_result")
@patch("eventkit_cloud.tasks.export_tasks.os")
@patch("eventkit_cloud.tasks.export_tasks.get_export_filepath")
@patch("eventkit_cloud.tasks.export_tasks.get_metadata")
@patch("eventkit_cloud.tasks.export_tasks.gdalutils.convert")
@patch("eventkit_cloud.tasks.export_tasks.mapproxy.MapproxyGeopackage")
def test_reprojection_task(
self, mock_mapproxy, mock_gdal_convert, mock_get_metadata, mock_get_export_filepath, mock_os, mock_parse_result
):
job_name = self.job.name.lower()
in_projection = "4326"
out_projection = "3857"
expected_provider_slug = "some_provider"
self.provider.slug = expected_provider_slug
self.provider.config = None
self.provider.save()
date = default_format_time(timezone.now())
driver = "tif"
mock_get_export_filepath.return_value = expected_infile = expected_outfile = "/path/to/file.ext"
expected_output_path = os.path.join(self.stage_dir, expected_outfile)
expected_input_path = os.path.join(self.stage_dir, expected_infile)
export_provider_task = DataProviderTaskRecord.objects.create(
run=self.run, status=TaskState.PENDING.value, provider=self.provider
)
saved_export_task = ExportTaskRecord.objects.create(
export_provider_task=export_provider_task, status=TaskState.PENDING.value, name=reprojection_task.name
)
task_uid = str(saved_export_task.uid)
config = """
cert_info:
cert_path: '/path/to/cert'
cert_pass_var: 'fake_pass'
"""
selection = "selection.geojson"
metadata = {"data_sources": {expected_provider_slug: {"type": "something"}}}
mock_get_metadata.return_value = metadata
mock_gdal_convert.return_value = expected_output_path
mock_parse_result.side_effect = [driver, selection, None, expected_infile]
mock_get_export_filepath.return_value = expected_output_path
mock_os.path.splitext.return_value = ["path", driver]
previous_task_result = {"source": expected_output_path}
reprojection_task.run(
run_uid=self.run.uid,
result=previous_task_result,
task_uid=task_uid,
stage_dir=self.stage_dir,
job_name=job_name,
projection=None,
config=None,
user_details=None,
)
# test reprojection is skipped
mock_os.rename.assert_called_once_with(expected_infile, expected_output_path)
mock_parse_result.side_effect = [driver, selection, None, expected_input_path]
reprojection_task.run(
run_uid=self.run.uid,
result=previous_task_result,
task_uid=task_uid,
stage_dir=self.stage_dir,
job_name=job_name,
projection=out_projection,
config=config,
user_details=None,
)
# test reprojecting
mock_gdal_convert.assert_called_once_with(
driver=driver,
input_file=f"GTIFF_RAW:{expected_input_path}",
output_file=expected_output_path,
task_uid=task_uid,
projection=out_projection,
boundary=selection,
warp_params=ANY,
translate_params=ANY,
)
# test reprojecting raster geopackages
driver = "gpkg"
level_from = 0
level_to = 12
metadata = {
"data_sources": {expected_provider_slug: {"type": "raster", "level_from": level_from, "level_to": level_to}}
}
mock_get_metadata.return_value = metadata
expected_infile = f"{job_name}-{in_projection}-{expected_provider_slug}-{date}.{driver}"
expected_input_path = os.path.join(self.stage_dir, expected_infile)
mock_os.path.splitext.return_value = ["path", driver]
mock_parse_result.side_effect = [driver, selection, None, expected_input_path]
reprojection_task.run(
run_uid=self.run.uid,
result=previous_task_result,
task_uid=task_uid,
stage_dir=self.stage_dir,
job_name=job_name,
projection=out_projection,
config=config,
user_details=None,
)
mock_mapproxy.assert_called_once_with(
gpkgfile=expected_output_path,
service_url=expected_output_path,
name=job_name,
config=config,
bbox=ANY,
level_from=level_from,
level_to=level_to,
task_uid=task_uid,
selection=selection,
projection=out_projection,
input_gpkg=expected_input_path,
)
mock_mapproxy().convert.assert_called_once()
@patch("eventkit_cloud.tasks.export_tasks.get_export_filepath")
@patch("eventkit_cloud.tasks.export_tasks.find_in_zip")
@patch("eventkit_cloud.tasks.export_tasks.get_geometry")
@patch("eventkit_cloud.tasks.export_tasks.os.getenv")
@patch("eventkit_cloud.tasks.export_tasks.get_ogcapi_data")
@patch("eventkit_cloud.tasks.export_tasks.gdalutils.convert")
@patch("celery.app.task.Task.request")
def test_ogcapi_process_export_task(
self,
mock_request,
mock_convert,
mock_get_ogcapi_data,
mock_getenv,
mock_get_geometry,
mock_find_in_zip,
mock_get_export_filepath,
):
celery_uid = str(uuid.uuid4())
type(mock_request).id = PropertyMock(return_value=celery_uid)
job_name = self.job.name.lower()
projection = 4326
bbox = [1, 2, 3, 4]
example_geojson = "/path/to/geo.json"
example_result = {"selection": example_geojson}
expected_provider_slug = "ogc_api_proc"
example_format_slug = "fmt"
self.provider.export_provider_type = DataProviderType.objects.get(type_name="ogcapi-process")
self.provider.slug = expected_provider_slug
self.provider.config = None
self.provider.save()
expected_outfile = "/path/to/file.ext"
expected_output_path = os.path.join(self.stage_dir, expected_outfile)
expected_outzip = "/path/to/file.zip"
expected_outzip_path = os.path.join(self.stage_dir, expected_outzip)
source_file = "foo.gpkg"
export_provider_task = DataProviderTaskRecord.objects.create(
run=self.run, status=TaskState.PENDING.value, provider=self.provider
)
saved_export_task = ExportTaskRecord.objects.create(
export_provider_task=export_provider_task,
status=TaskState.PENDING.value,
name=ogcapi_process_export_task.name,
)
username = "user"
password = "password"
mock_getenv.return_value = f"{username}:{password}"
task_uid = str(saved_export_task.uid)
ogcapi_process_export_task.update_task_state(task_status=TaskState.RUNNING.value, task_uid=task_uid)
mock_geometry = Mock()
mock_get_geometry.return_value = mock_geometry
cred_var = "USER_PASS_ENV_VAR"
config = f"""
ogcapi_process:
id: 'eventkit'
inputs:
input:
value: 'random'
format:
value: 'gpkg'
outputs:
format:
mediaType: 'application/zip'
output_file_ext: '.gpkg'
download_credentials:
cred_var: '{cred_var}'
cred_var: '{cred_var}'
"""
service_url = "http://example.test/v1/"
session_token = "_some_token_"
mock_get_ogcapi_data.return_value = expected_outzip_path
mock_convert.return_value = expected_output_path
mock_find_in_zip.return_value = source_file
mock_get_export_filepath.side_effect = [expected_output_path, expected_outzip_path]
result = ogcapi_process_export_task.run(
result=example_result,
run_uid=self.run.uid,
task_uid=task_uid,
stage_dir=self.stage_dir,
job_name=job_name,
projection=projection,
service_url=service_url,
layer=None,
config=config,
bbox=bbox,
session_token=session_token,
export_format_slug=example_format_slug,
)
mock_get_ogcapi_data.assert_called_with(
config=config,
task_uid=task_uid,
stage_dir=self.stage_dir,
bbox=bbox,
service_url=service_url,
session_token=session_token,
export_format_slug=example_format_slug,
selection=example_geojson,
download_path=expected_outzip_path,
)
mock_convert.assert_not_called()
expected_result = {"selection": example_geojson, "result": expected_outzip_path}
self.assertEqual(result, expected_result)
example_source_data = "source_path"
mock_find_in_zip.return_value = example_source_data
mock_convert.return_value = expected_output_path
mock_get_export_filepath.side_effect = [expected_output_path, expected_outzip_path]
result = ogcapi_process_export_task.run(
result=example_result,
run_uid=self.run.uid,
task_uid=task_uid,
stage_dir=self.stage_dir,
job_name=job_name,
projection=projection,
service_url=service_url,
layer=None,
config=config,
bbox=bbox,
session_token=session_token,
)
expected_result = {
"driver": "gpkg",
"file_extension": ".gpkg",
"ogcapi_process": expected_output_path,
"source": expected_output_path,
"gpkg": expected_output_path,
"selection": example_geojson,
"result": expected_outzip_path,
}
self.assertEqual(result, expected_result)
mock_convert.assert_called_once_with(
driver="gpkg",
input_file=example_source_data,
output_file=expected_output_path,
task_uid=task_uid,
projection=projection,
boundary=bbox,
)
@patch("eventkit_cloud.tasks.export_tasks.extract_metadata_files")
@patch("eventkit_cloud.tasks.export_tasks.update_progress")
@patch("eventkit_cloud.tasks.export_tasks.download_data")
@patch("eventkit_cloud.tasks.export_tasks.OgcApiProcess")
@patch("eventkit_cloud.tasks.export_tasks.get_geometry")
def test_get_ogcapi_data(
self,
mock_get_geometry,
mock_ogc_api_process,
mock_download_data,
mock_update_progress,
mock_extract_metadata_files,
):
bbox = [1, 2, 3, 4]
example_geojson = "/path/to/geo.json"
example_format_slug = "fmt"
task_uid = "1234"
mock_geometry = Mock()
mock_get_geometry.return_value = mock_geometry
config = """
ogcapi_process:
id: 'eventkit'
inputs:
input:
value: 'random'
format:
value: 'gpkg'
outputs:
format:
mediaType: 'application/zip'
output_file_ext: '.gpkg'
download_credentials:
cert_info:
cert_path: "something"
cert_pass: "something"
cert_info:
cert_path: "something"
cert_pass: "something"
"""
configuration = yaml.load(config)["ogcapi_process"]
service_url = "http://example.test/v1/"
session_token = "_some_token_"
example_download_url = "https://example.test/path.zip"
example_download_path = "/example/file.gpkg"
mock_ogc_api_process().get_job_results.return_value = example_download_url
mock_download_data.return_value = example_download_path
result = get_ogcapi_data(
config=config,
task_uid=task_uid,
stage_dir=self.stage_dir,
bbox=bbox,
service_url=service_url,
session_token=session_token,
export_format_slug=example_format_slug,
selection=example_geojson,
download_path=example_download_path,
)
self.assertEqual(result, example_download_path)
mock_ogc_api_process.called_with(
url=service_url,
config=config,
session_token=session_token,
task_id=task_uid,
cred_var=configuration.get("cred_var"),
cert_info=configuration.get("cert_info"),
)
mock_ogc_api_process().create_job.called_once_with(mock_geometry, file_format=example_format_slug)
mock_download_data.assert_called_once_with(
task_uid, example_download_url, example_download_path, session=None, headers=None, token=None
)
mock_extract_metadata_files.assert_called_once_with(example_download_path, self.stage_dir)
class TestFormatTasks(ExportTaskBase):
def test_ensure_display(self):
self.assertTrue(FormatTask.display)
|
terranodo/eventkit-cloud
|
eventkit_cloud/tasks/tests/test_export_tasks.py
|
Python
|
bsd-3-clause
| 86,451
|
import polyadcirc.run_framework.domain as dom
import polyadcirc.pyGriddata.manufacture_gap as manu
grid_dir = '.'
domain = dom.domain(grid_dir)
domain.read_spatial_grid()
x_values = [n.x for n in domain.node.values()]
y_values = [n.y for n in domain.node.values()]
xr = max(x_values)
xl = min(x_values)
yu = max(y_values)
yl = min(y_values)
x_points = (xl, 150, 750, xr)
p1 = [0, 0, 0, 1]
p2 = [0, 0, 0, 1]
p3 = [.2, .3, .4, .1]
rand_rect = manu.random_vertical(x_points, yl, yu, [1, 2, 3, 4], p_sections=
[p1, p2, p3])
manu.write_gapfile(rand_rect, xl, yl, 'sections.asc')
|
UT-CHG/PolyADCIRC
|
examples/pyGriddata/manufactureGAP_vertical.py
|
Python
|
bsd-3-clause
| 610
|
import zlib
import struct
import time
def parse_encoding_header(header):
"""
Break up the `HTTP_ACCEPT_ENCODING` header into a dict of the form,
{'encoding-name':qvalue}.
"""
encodings = {'identity':1.0}
for encoding in header.split(","):
if(encoding.find(";") > -1):
encoding, qvalue = encoding.split(";")
encoding = encoding.strip()
qvalue = qvalue.split('=', 1)[1]
if(qvalue != ""):
encodings[encoding] = float(qvalue)
else:
encodings[encoding] = 1
else:
encodings[encoding] = 1
return encodings
def gzip_requested(accept_encoding_header):
"""
Check to see if the client can accept gzipped output, and whether or
not it is even the preferred method. If `identity` is higher, then no
gzipping should occur.
"""
encodings = parse_encoding_header(accept_encoding_header)
# Do the actual comparisons
if('gzip' in encodings):
return encodings['gzip'] >= encodings['identity']
elif('*' in encodings):
return encodings['*'] >= encodings['identity']
else:
return False
# After much Googling and gnashing of teeth, this function stolen from
# cherrypy.lib.encoding seems to be the most straightforward way to do gzip
# encoding of a stream without loading the whole thing into memory at once.
def compress(chunks, compress_level, close=True):
"""
Compress 'chunks' at the given compress_level, where 'chunks' is an iterable
over chunks of bytes. If close=True, then look for .close() method on chunks
and call that when done iterating.
"""
try:
# See http://www.gzip.org/zlib/rfc-gzip.html
yield b'\x1f\x8b' # ID1 and ID2: gzip marker
yield b'\x08' # CM: compression method
yield b'\x00' # FLG: none set
# MTIME: 4 bytes
yield struct.pack("<L", int(time.time()) & int('FFFFFFFF', 16))
yield b'\x02' # XFL: max compression, slowest algo
yield b'\xff' # OS: unknown
crc = zlib.crc32(b"")
size = 0
zobj = zlib.compressobj(compress_level,
zlib.DEFLATED, -zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL, 0)
for chunk in chunks:
size += len(chunk)
crc = zlib.crc32(chunk, crc)
yield zobj.compress(chunk)
yield zobj.flush()
# CRC32: 4 bytes
yield struct.pack("<L", crc & int('FFFFFFFF', 16))
# ISIZE: 4 bytes
yield struct.pack("<L", size & int('FFFFFFFF', 16))
finally:
if close and hasattr(chunks, 'close'):
chunks.close()
|
btubbs/spa
|
spa/gzip_util.py
|
Python
|
bsd-3-clause
| 2,754
|
from django.contrib import admin
from .models import Topping, Pizza
class ToppingInlineAdmin(admin.TabularInline):
model = Topping
extra = 1
class PizzaAdmin(admin.ModelAdmin):
fieldsets = (
('', {
'fields': ('description',),
}),
('Advanced', {
'classes': ('collapse',),
'fields': ('allergens',)
}),
)
inlines = [ToppingInlineAdmin]
admin.site.register(Pizza, PizzaAdmin)
|
vxsx/djangocms-text-ckeditor
|
djangocms_text_ckeditor/test_app/admin.py
|
Python
|
bsd-3-clause
| 466
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2019 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
"""The :class:`FunctionalTester` object provides a higher-level interface to
working with a Trac environment to make test cases more succinct.
"""
import io
import re
from trac.tests.functional import internal_error
from trac.tests.functional.better_twill import tc, b
from trac.tests.contentgen import random_page, random_sentence, random_word, \
random_unique_camel
from trac.util.html import tag
from trac.util.text import to_utf8, unicode_quote
class FunctionalTester(object):
"""Provides a library of higher-level operations for interacting with a
test environment.
It makes assumptions such as knowing what ticket number is next, so
avoid doing things manually in a :class:`FunctionalTestCase` when you can.
"""
def __init__(self, url):
"""Create a :class:`FunctionalTester` for the given Trac URL and
Subversion URL"""
self.url = url
self.ticketcount = 0
# Connect, and login so we can run tests.
self.go_to_front()
self.login('admin')
def login(self, username):
"""Login as the given user"""
username = to_utf8(username)
tc.add_auth("", self.url, username, username)
self.go_to_front()
tc.find("Login")
tc.follow(r"\bLogin\b")
# We've provided authentication info earlier, so this should
# redirect back to the base url.
tc.find('logged in as[ \t\n]+<span class="trac-author-user">%s</span>'
% username)
tc.find("Logout")
tc.url(self.url)
tc.notfind(internal_error)
def logout(self):
"""Logout"""
tc.submit('logout', 'logout')
tc.notfind(internal_error)
tc.notfind('logged in as')
def create_ticket(self, summary=None, info=None):
"""Create a new (random) ticket in the test environment. Returns
the new ticket number.
:param summary:
may optionally be set to the desired summary
:param info:
may optionally be set to a dictionary of field value pairs for
populating the ticket. ``info['summary']`` overrides summary.
`summary` and `description` default to randomly-generated values.
"""
info = info or {}
self.go_to_front()
tc.follow(r"\bNew Ticket\b")
tc.notfind(internal_error)
if summary is None:
summary = random_sentence(5)
tc.formvalue('propertyform', 'field_summary', summary)
tc.formvalue('propertyform', 'field_description', random_page())
if 'owner' in info:
tc.formvalue('propertyform', 'action', 'assign')
tc.formvalue('propertyform',
'action_create_and_assign_reassign_owner',
info.pop('owner'))
for field, value in info.items():
tc.formvalue('propertyform', 'field_%s' % field, value)
tc.submit('submit')
tc.notfind(internal_error)
# we should be looking at the newly created ticket
tc.url(self.url + '/ticket/%s' % (self.ticketcount + 1))
# Increment self.ticketcount /after/ we've verified that the ticket
# was created so a failure does not trigger spurious later
# failures.
self.ticketcount += 1
return self.ticketcount
def quickjump(self, search):
"""Do a quick search to jump to a page."""
tc.formvalue('search', 'q', search)
tc.submit()
tc.notfind(internal_error)
def go_to_url(self, url):
tc.go(url)
tc.url(re.escape(url))
tc.notfind(internal_error)
def go_to_front(self):
"""Go to the Trac front page"""
self.go_to_url(self.url)
def go_to_ticket(self, ticketid=None):
"""Surf to the page for the given ticket ID, or to the NewTicket page
if `ticketid` is not specified or is `None`. If `ticketid` is
specified, it assumes the ticket exists."""
if ticketid is not None:
ticket_url = self.url + '/ticket/%s' % ticketid
else:
ticket_url = self.url + '/newticket'
self.go_to_url(ticket_url)
tc.url(ticket_url + '$')
def go_to_wiki(self, name, version=None):
"""Surf to the wiki page. By default this will be the latest version
of the page.
:param name: name of the wiki page.
:param version: version of the wiki page.
"""
# Used to go based on a quickjump, but if the wiki pagename isn't
# camel case, that won't work.
wiki_url = self.url + '/wiki/%s' % name
if version:
wiki_url += '?version=%s' % version
self.go_to_url(wiki_url)
def go_to_timeline(self):
"""Surf to the timeline page."""
self.go_to_front()
tc.follow(r"\bTimeline\b")
tc.url(self.url + '/timeline')
def go_to_view_tickets(self, href='report'):
"""Surf to the View Tickets page. By default this will be the Reports
page, but 'query' can be specified for the `href` argument to support
non-default configurations."""
self.go_to_front()
tc.follow(r"\bView Tickets\b")
tc.url(self.url + '/' + href.lstrip('/'))
def go_to_query(self):
"""Surf to the custom query page."""
self.go_to_front()
tc.follow(r"\bView Tickets\b")
tc.follow(r"\bNew Custom Query\b")
tc.url(self.url + '/query')
def go_to_admin(self, panel_label=None):
"""Surf to the webadmin page. Continue surfing to a specific
admin page if `panel_label` is specified."""
self.go_to_front()
tc.follow(r"\bAdmin\b")
tc.url(self.url + '/admin')
if panel_label is not None:
tc.follow(r"\b%s\b" % panel_label)
def go_to_roadmap(self):
"""Surf to the roadmap page."""
self.go_to_front()
tc.follow(r"\bRoadmap\b")
tc.url(self.url + '/roadmap')
def go_to_milestone(self, name):
"""Surf to the specified milestone page. Assumes milestone exists."""
self.go_to_roadmap()
tc.follow(r"\bMilestone: %s\b" % name)
tc.url(self.url + '/milestone/%s' % name)
def go_to_report(self, id, args=None):
"""Surf to the specified report.
Assumes the report exists. Report variables will be appended if
specified.
:param id: id of the report
:param args: may optionally specify a dictionary of arguments to
be encoded as a query string
"""
report_url = self.url + "/report/%s" % id
if args:
arglist = []
for param, value in args.items():
arglist.append('%s=%s' % (param.upper(), unicode_quote(value)))
report_url += '?' + '&'.join(arglist)
tc.go(report_url)
tc.url(report_url.encode('string-escape').replace('?', '\?'))
def go_to_preferences(self, panel_label=None):
"""Surf to the preferences page. Continue surfing to a specific
preferences panel if `panel_label` is specified."""
self.go_to_front()
tc.follow(r"\bPreferences\b")
tc.url(self.url + '/prefs')
if panel_label is not None:
tc.follow(r"\b%s\b" % panel_label)
def add_comment(self, ticketid, comment=None):
"""Adds a comment to the given ticket ID, assumes ticket exists."""
self.go_to_ticket(ticketid)
if comment is None:
comment = random_sentence()
tc.formvalue('propertyform', 'comment', comment)
tc.submit("submit")
# Verify we're where we're supposed to be.
# The fragment is stripped since Python 2.7.1, see:
# http://trac.edgewall.org/ticket/9990#comment:18
tc.url(self.url + '/ticket/%s(?:#comment:.*)?$' % ticketid)
return comment
def attach_file_to_ticket(self, ticketid, data=None, filename=None,
description=None, replace=False,
content_type=None):
"""Attaches a file to the given ticket id, with random data if none is
provided. Assumes the ticket exists.
"""
self.go_to_ticket(ticketid)
return self._attach_file_to_resource('ticket', ticketid, data,
filename, description,
replace, content_type)
def clone_ticket(self, ticketid):
"""Create a clone of the given ticket id using the clone button."""
ticket_url = self.url + '/ticket/%s' % ticketid
tc.go(ticket_url)
tc.url(ticket_url)
tc.formvalue('clone', 'clone', 'Clone')
tc.submit()
# we should be looking at the newly created ticket
self.ticketcount += 1
tc.url(self.url + "/ticket/%s" % self.ticketcount)
return self.ticketcount
def create_wiki_page(self, name=None, content=None, comment=None):
"""Creates a wiki page, with a random unique CamelCase name if none
is provided, random content if none is provided and a random comment
if none is provided. Returns the name of the wiki page.
"""
if name is None:
name = random_unique_camel()
if content is None:
content = random_page()
self.go_to_wiki(name)
tc.find("The page[ \n]+%s[ \n]+does not exist." % tag.strong(name))
self.edit_wiki_page(name, content, comment)
# verify the event shows up in the timeline
self.go_to_timeline()
tc.formvalue('prefs', 'wiki', True)
tc.submit()
tc.find(name + ".*created")
self.go_to_wiki(name)
return name
def edit_wiki_page(self, name, content=None, comment=None):
"""Edits a wiki page, with random content is none is provided.
and a random comment if none is provided. Returns the content.
"""
if content is None:
content = random_page()
if comment is None:
comment = random_sentence()
self.go_to_wiki(name)
tc.formvalue('modifypage', 'action', 'edit')
tc.submit()
tc.formvalue('edit', 'text', content)
tc.formvalue('edit', 'comment', comment)
tc.submit('save')
page_url = self.url + '/wiki/%s' % name
tc.url(page_url+'$')
return content
def attach_file_to_wiki(self, name, data=None, filename=None,
description=None, replace=False,
content_type=None):
"""Attaches a file to the given wiki page, with random content if none
is provided. Assumes the wiki page exists.
"""
self.go_to_wiki(name)
return self._attach_file_to_resource('wiki', name, data,
filename, description,
replace, content_type)
def create_milestone(self, name=None, due=None):
"""Creates the specified milestone, with a random name if none is
provided. Returns the name of the milestone.
"""
if name is None:
name = random_unique_camel()
milestone_url = self.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.url(milestone_url)
tc.formvalue('addmilestone', 'name', name)
if due:
# TODO: How should we deal with differences in date formats?
tc.formvalue('addmilestone', 'duedate', due)
tc.submit()
tc.notfind(internal_error)
tc.notfind('Milestone .* already exists')
tc.url(milestone_url)
tc.find(name)
# Make sure it's on the roadmap.
tc.follow(r"\bRoadmap\b")
tc.url(self.url + "/roadmap")
tc.find('Milestone:.*%s' % name)
tc.follow(r"\b%s\b" % name)
tc.url('%s/milestone/%s' % (self.url, unicode_quote(name)))
if not due:
tc.find('No date set')
return name
def attach_file_to_milestone(self, name, data=None, filename=None,
description=None, replace=False,
content_type=None):
"""Attaches a file to the given milestone, with random content if none
is provided. Assumes the milestone exists.
"""
self.go_to_milestone(name)
return self._attach_file_to_resource('milestone', name, data,
filename, description,
replace, content_type)
def create_component(self, name=None, owner=None, description=None):
"""Creates the specified component, with a random camel-cased name if
none is provided. Returns the name."""
if name is None:
name = random_unique_camel()
component_url = self.url + "/admin/ticket/components"
tc.go(component_url)
tc.url(component_url)
tc.formvalue('addcomponent', 'name', name)
if owner is not None:
tc.formvalue('addcomponent', 'owner', owner)
tc.submit()
# Verify the component appears in the component list
tc.url(component_url)
tc.find(name)
tc.notfind(internal_error)
if description is not None:
tc.follow(r"\b%s\b" % name)
tc.formvalue('edit', 'description', description)
tc.submit('save')
tc.url(component_url)
tc.find("Your changes have been saved.")
tc.notfind(internal_error)
# TODO: verify the component shows up in the newticket page
return name
def create_enum(self, kind, name=None):
"""Helper to create the specified enum (used for ``priority``,
``severity``, etc). If no name is given, a unique random word is used.
The name is returned.
"""
if name is None:
name = random_unique_camel()
priority_url = self.url + "/admin/ticket/" + kind
tc.go(priority_url)
tc.url(priority_url)
tc.formvalue('addenum', 'name', name)
tc.submit()
tc.url(priority_url)
tc.find(name)
tc.notfind(internal_error)
return name
def create_priority(self, name=None):
"""Create a new priority enum"""
return self.create_enum('priority', name)
def create_resolution(self, name=None):
"""Create a new resolution enum"""
return self.create_enum('resolution', name)
def create_severity(self, name=None):
"""Create a new severity enum"""
return self.create_enum('severity', name)
def create_type(self, name=None):
"""Create a new ticket type enum"""
return self.create_enum('type', name)
def create_version(self, name=None, releasetime=None):
"""Create a new version. The name defaults to a random camel-cased
word if not provided."""
version_admin = self.url + "/admin/ticket/versions"
if name is None:
name = random_unique_camel()
tc.go(version_admin)
tc.url(version_admin)
tc.formvalue('addversion', 'name', name)
if releasetime is not None:
tc.formvalue('addversion', 'time', releasetime)
tc.submit()
tc.url(version_admin)
tc.find(name)
tc.notfind(internal_error)
# TODO: verify releasetime
def create_report(self, title, query, description):
"""Create a new report with the given title, query, and description"""
self.go_to_front()
tc.follow(r"\bView Tickets\b")
tc.formvalue('create_report', 'action', 'new') # select the right form
tc.submit()
tc.find('New Report')
tc.notfind(internal_error)
tc.formvalue('edit_report', 'title', title)
tc.formvalue('edit_report', 'description', description)
tc.formvalue('edit_report', 'query', query)
tc.submit()
reportnum = b.get_url().split('/')[-1]
# TODO: verify the url is correct
# TODO: verify the report number is correct
# TODO: verify the report does not cause an internal error
# TODO: verify the title appears on the report list
return reportnum
def ticket_set_milestone(self, ticketid, milestone):
"""Set the milestone on a given ticket."""
self.go_to_ticket(ticketid)
tc.formvalue('propertyform', 'milestone', milestone)
tc.submit('submit')
# TODO: verify the change occurred.
def _attach_file_to_resource(self, realm, name, data=None,
filename=None, description=None,
replace=False, content_type=None):
"""Attaches a file to a resource. Assumes the resource exists and
has already been navigated to."""
if data is None:
data = random_page()
if description is None:
description = random_sentence()
if filename is None:
filename = random_word()
tc.submit('attachfilebutton', 'attachfile')
tc.url(self.url + r'/attachment/%s/%s/\?action=new$' % (realm, name))
fp = io.BytesIO(data)
tc.formfile('attachment', 'attachment', filename,
content_type=content_type, fp=fp)
tc.formvalue('attachment', 'description', description)
if replace:
tc.formvalue('attachment', 'replace', True)
tc.submit()
tc.url(self.url + r'/attachment/%s/%s/$' % (realm, name))
return filename
|
rbaumg/trac
|
trac/tests/functional/tester.py
|
Python
|
bsd-3-clause
| 18,151
|
import numpy as np
from numpy.testing import assert_equal, assert_array_equal
from scipy.stats import rankdata, tiecorrect
class TestTieCorrect(object):
def test_empty(self):
"""An empty array requires no correction, should return 1.0."""
ranks = np.array([], dtype=np.float64)
c = tiecorrect(ranks)
assert_equal(c, 1.0)
def test_one(self):
"""A single element requires no correction, should return 1.0."""
ranks = np.array([1.0], dtype=np.float64)
c = tiecorrect(ranks)
assert_equal(c, 1.0)
def test_no_correction(self):
"""Arrays with no ties require no correction."""
ranks = np.arange(2.0)
c = tiecorrect(ranks)
assert_equal(c, 1.0)
ranks = np.arange(3.0)
c = tiecorrect(ranks)
assert_equal(c, 1.0)
def test_basic(self):
"""Check a few basic examples of the tie correction factor."""
# One tie of two elements
ranks = np.array([1.0, 2.5, 2.5])
c = tiecorrect(ranks)
T = 2.0
N = ranks.size
expected = 1.0 - (T**3 - T) / (N**3 - N)
assert_equal(c, expected)
# One tie of two elements (same as above, but tie is not at the end)
ranks = np.array([1.5, 1.5, 3.0])
c = tiecorrect(ranks)
T = 2.0
N = ranks.size
expected = 1.0 - (T**3 - T) / (N**3 - N)
assert_equal(c, expected)
# One tie of three elements
ranks = np.array([1.0, 3.0, 3.0, 3.0])
c = tiecorrect(ranks)
T = 3.0
N = ranks.size
expected = 1.0 - (T**3 - T) / (N**3 - N)
assert_equal(c, expected)
# Two ties, lengths 2 and 3.
ranks = np.array([1.5, 1.5, 4.0, 4.0, 4.0])
c = tiecorrect(ranks)
T1 = 2.0
T2 = 3.0
N = ranks.size
expected = 1.0 - ((T1**3 - T1) + (T2**3 - T2)) / (N**3 - N)
assert_equal(c, expected)
def test_overflow(self):
ntie, k = 2000, 5
a = np.repeat(np.arange(k), ntie)
n = a.size # ntie * k
out = tiecorrect(rankdata(a))
assert_equal(out, 1.0 - k * (ntie**3 - ntie) / float(n**3 - n))
class TestRankData(object):
def test_empty(self):
"""stats.rankdata([]) should return an empty array."""
a = np.array([], dtype=int)
r = rankdata(a)
assert_array_equal(r, np.array([], dtype=np.float64))
r = rankdata([])
assert_array_equal(r, np.array([], dtype=np.float64))
def test_one(self):
"""Check stats.rankdata with an array of length 1."""
data = [100]
a = np.array(data, dtype=int)
r = rankdata(a)
assert_array_equal(r, np.array([1.0], dtype=np.float64))
r = rankdata(data)
assert_array_equal(r, np.array([1.0], dtype=np.float64))
def test_basic(self):
"""Basic tests of stats.rankdata."""
data = [100, 10, 50]
expected = np.array([3.0, 1.0, 2.0], dtype=np.float64)
a = np.array(data, dtype=int)
r = rankdata(a)
assert_array_equal(r, expected)
r = rankdata(data)
assert_array_equal(r, expected)
data = [40, 10, 30, 10, 50]
expected = np.array([4.0, 1.5, 3.0, 1.5, 5.0], dtype=np.float64)
a = np.array(data, dtype=int)
r = rankdata(a)
assert_array_equal(r, expected)
r = rankdata(data)
assert_array_equal(r, expected)
data = [20, 20, 20, 10, 10, 10]
expected = np.array([5.0, 5.0, 5.0, 2.0, 2.0, 2.0], dtype=np.float64)
a = np.array(data, dtype=int)
r = rankdata(a)
assert_array_equal(r, expected)
r = rankdata(data)
assert_array_equal(r, expected)
# The docstring states explicitly that the argument is flattened.
a2d = a.reshape(2, 3)
r = rankdata(a2d)
assert_array_equal(r, expected)
def test_rankdata_object_string(self):
min_rank = lambda a: [1 + sum(i < j for i in a) for j in a]
max_rank = lambda a: [sum(i <= j for i in a) for j in a]
ordinal_rank = lambda a: min_rank([(x, i) for i, x in enumerate(a)])
def average_rank(a):
return [(i + j) / 2.0 for i, j in zip(min_rank(a), max_rank(a))]
def dense_rank(a):
b = np.unique(a)
return [1 + sum(i < j for i in b) for j in a]
rankf = dict(min=min_rank, max=max_rank, ordinal=ordinal_rank,
average=average_rank, dense=dense_rank)
def check_ranks(a):
for method in 'min', 'max', 'dense', 'ordinal', 'average':
out = rankdata(a, method=method)
assert_array_equal(out, rankf[method](a))
val = ['foo', 'bar', 'qux', 'xyz', 'abc', 'efg', 'ace', 'qwe', 'qaz']
check_ranks(np.random.choice(val, 200))
check_ranks(np.random.choice(val, 200).astype('object'))
val = np.array([0, 1, 2, 2.718, 3, 3.141], dtype='object')
check_ranks(np.random.choice(val, 200).astype('object'))
def test_large_int(self):
data = np.array([2**60, 2**60+1], dtype=np.uint64)
r = rankdata(data)
assert_array_equal(r, [1.0, 2.0])
data = np.array([2**60, 2**60+1], dtype=np.int64)
r = rankdata(data)
assert_array_equal(r, [1.0, 2.0])
data = np.array([2**60, -2**60+1], dtype=np.int64)
r = rankdata(data)
assert_array_equal(r, [2.0, 1.0])
def test_big_tie(self):
for n in [10000, 100000, 1000000]:
data = np.ones(n, dtype=int)
r = rankdata(data)
expected_rank = 0.5 * (n + 1)
assert_array_equal(r, expected_rank * data,
"test failed with n=%d" % n)
_cases = (
# values, method, expected
([], 'average', []),
([], 'min', []),
([], 'max', []),
([], 'dense', []),
([], 'ordinal', []),
#
([100], 'average', [1.0]),
([100], 'min', [1.0]),
([100], 'max', [1.0]),
([100], 'dense', [1.0]),
([100], 'ordinal', [1.0]),
#
([100, 100, 100], 'average', [2.0, 2.0, 2.0]),
([100, 100, 100], 'min', [1.0, 1.0, 1.0]),
([100, 100, 100], 'max', [3.0, 3.0, 3.0]),
([100, 100, 100], 'dense', [1.0, 1.0, 1.0]),
([100, 100, 100], 'ordinal', [1.0, 2.0, 3.0]),
#
([100, 300, 200], 'average', [1.0, 3.0, 2.0]),
([100, 300, 200], 'min', [1.0, 3.0, 2.0]),
([100, 300, 200], 'max', [1.0, 3.0, 2.0]),
([100, 300, 200], 'dense', [1.0, 3.0, 2.0]),
([100, 300, 200], 'ordinal', [1.0, 3.0, 2.0]),
#
([100, 200, 300, 200], 'average', [1.0, 2.5, 4.0, 2.5]),
([100, 200, 300, 200], 'min', [1.0, 2.0, 4.0, 2.0]),
([100, 200, 300, 200], 'max', [1.0, 3.0, 4.0, 3.0]),
([100, 200, 300, 200], 'dense', [1.0, 2.0, 3.0, 2.0]),
([100, 200, 300, 200], 'ordinal', [1.0, 2.0, 4.0, 3.0]),
#
([100, 200, 300, 200, 100], 'average', [1.5, 3.5, 5.0, 3.5, 1.5]),
([100, 200, 300, 200, 100], 'min', [1.0, 3.0, 5.0, 3.0, 1.0]),
([100, 200, 300, 200, 100], 'max', [2.0, 4.0, 5.0, 4.0, 2.0]),
([100, 200, 300, 200, 100], 'dense', [1.0, 2.0, 3.0, 2.0, 1.0]),
([100, 200, 300, 200, 100], 'ordinal', [1.0, 3.0, 5.0, 4.0, 2.0]),
#
([10] * 30, 'ordinal', np.arange(1.0, 31.0)),
)
def test_cases():
for values, method, expected in _cases:
r = rankdata(values, method=method)
assert_array_equal(r, expected)
|
aeklant/scipy
|
scipy/stats/tests/test_rank.py
|
Python
|
bsd-3-clause
| 7,448
|
"""
COMMAND-LINE SPECIFIC STUFF
=============================================================================
"""
import markdown
import sys
import optparse
import logging
from logging import DEBUG, INFO, CRITICAL
logger = logging.getLogger('MARKDOWN')
def parse_options():
"""
Define and parse `optparse` options for command-line usage.
"""
usage = """%prog [options] [INPUTFILE]
(STDIN is assumed if no INPUTFILE is given)"""
desc = "A Python implementation of John Gruber's Markdown. " \
"http://www.freewisdom.org/projects/python-markdown/"
ver = "%%prog %s" % markdown.version
parser = optparse.OptionParser(usage=usage, description=desc, version=ver)
parser.add_option("-f", "--file", dest="filename", default=sys.stdout,
help="Write output to OUTPUT_FILE. Defaults to STDOUT.",
metavar="OUTPUT_FILE")
parser.add_option("-e", "--encoding", dest="encoding",
help="Encoding for input and output files.",)
parser.add_option("-q", "--quiet", default = CRITICAL,
action="store_const", const=CRITICAL+10, dest="verbose",
help="Suppress all warnings.")
parser.add_option("-v", "--verbose",
action="store_const", const=INFO, dest="verbose",
help="Print all warnings.")
parser.add_option("-s", "--safe", dest="safe", default=False,
metavar="SAFE_MODE",
help="'replace', 'remove' or 'escape' HTML tags in input")
parser.add_option("-o", "--output_format", dest="output_format",
default='xhtml1', metavar="OUTPUT_FORMAT",
help="'xhtml1' (default), 'html4' or 'html5'.")
parser.add_option("--noisy",
action="store_const", const=DEBUG, dest="verbose",
help="Print debug messages.")
parser.add_option("-x", "--extension", action="append", dest="extensions",
help = "Load extension EXTENSION.", metavar="EXTENSION")
parser.add_option("-n", "--no_lazy_ol", dest="lazy_ol",
action='store_false', default=True,
help="Observe number of first item of ordered lists.")
(options, args) = parser.parse_args()
if len(args) == 0:
input_file = sys.stdin
else:
input_file = args[0]
if not options.extensions:
options.extensions = []
return {'input': input_file,
'output': options.filename,
'safe_mode': options.safe,
'extensions': options.extensions,
'encoding': options.encoding,
'output_format': options.output_format,
'lazy_ol': options.lazy_ol}, options.verbose
def run():
"""Run Markdown from the command line."""
# Parse options and adjust logging level if necessary
options, logging_level = parse_options()
if not options: sys.exit(2)
logger.setLevel(logging_level)
logger.addHandler(logging.StreamHandler())
# Run
markdown.markdownFromFile(**options)
if __name__ == '__main__':
# Support running module as a commandline command.
# Python 2.5 & 2.6 do: `python -m markdown.__main__ [options] [args]`.
# Python 2.7 & 3.x do: `python -m markdown [options] [args]`.
run()
|
leafnode/npp_markdown_script
|
lib/markdown/__main__.py
|
Python
|
bsd-3-clause
| 3,376
|
'''
Created on 2013-7-21
@author: hujin
'''
import sys
from PySide.QtGui import QApplication
from mdeditor.ui.window import MainWindow
class Application(QApplication):
def __init__(self):
'''
Constructor
'''
super(Application, self).__init__(sys.argv)
def run(self):
'''
Run the application.
'''
frame = MainWindow()
frame.show()
self.exec_()
sys.exit()
|
bixuehujin/mdeditor
|
mdeditor/application.py
|
Python
|
bsd-3-clause
| 472
|
from rauth import OAuth1Service, OAuth2Service
from flask import current_app, url_for, request, redirect, session
import json
class OAuthSignIn(object):
providers = None
def __init__(self, provider_name):
self.provider_name = provider_name
credentials = current_app.config['OAUTH_CREDENTIALS'][provider_name]
self.consumer_id = credentials['id']
self.consumer_secret = credentials['secret']
def authorize(self):
pass
def callback(self):
pass
def get_callback_url(self):
return url_for('oauth_callback', provider=self.provider_name,
_external=True)
@classmethod
def get_provider(self, provider_name):
if self.providers is None:
self.providers = {}
for provider_class in self.__subclasses__():
provider = provider_class()
self.providers[provider.provider_name] = provider
return self.providers[provider_name]
def dump(obj):
for attr in dir(obj):
print "obj.%s = %s" % (attr, getattr(obj, attr))
class FacebookSignIn(OAuthSignIn):
def __init__(self):
super(FacebookSignIn, self).__init__('facebook')
self.service = OAuth2Service(
name='facebook',
client_id=self.consumer_id,
client_secret=self.consumer_secret,
authorize_url='https://graph.facebook.com/oauth/authorize',
access_token_url='https://graph.facebook.com/oauth/access_token',
base_url='https://graph.facebook.com/'
)
def authorize(self):
return redirect(self.service.get_authorize_url(
scope='email',
response_type='code',
redirect_uri=self.get_callback_url())
)
def callback(self):
if 'code' not in request.args:
return None, None, None , None , None, None,None
oauth_session = self.service.get_auth_session(
data={'code': request.args['code'],
'grant_type': 'authorization_code',
'redirect_uri': self.get_callback_url()}
)
me = oauth_session.get('me').json()
picture = oauth_session.get('me/picture')
dump(me.get('bio'))
return (
'facebook$' + me['id'],
me.get('email').split('@')[0], # Facebook does not provide
# username, so the email's user
# is used instead
me.get('email'),
me.get('gender'),
me.get('timezone'),
picture.url,
me.get('locale')
)
class GoogleSignIn(OAuthSignIn):
def __init__(self):
super(GoogleSignIn, self).__init__('google')
self.service = OAuth2Service(
name='google',
base_url='https://www.googleapis.com/plus/v1/people/',
authorize_url='https://accounts.google.com/o/oauth2/auth',
access_token_url='https://accounts.google.com/o/oauth2/token',
client_id=self.consumer_id,
client_secret=self.consumer_secret
)
def authorize(self):
return redirect(self.service.get_authorize_url(
scope='email',
response_type='code',
redirect_uri=self.get_callback_url())
)
def callback(self):
if 'code' not in request.args:
return None, None, None , None , None, None,None
oauth_session = self.service.get_auth_session(
data={'code': request.args['code'],
'grant_type': 'authorization_code',
'redirect_uri': self.get_callback_url()},
decoder=json.loads
)
print dump(oauth_session.get('me'))
me = oauth_session.get('me').json()
#picture = oauth_session.get('me/picture')
dump(me)
return (
'google$' + me['id'],
me['emails'][0]['value'].split('@')[0], # Facebook does not provide
# username, so the email's user
# is used instead
me['emails'][0]['value'],
me['gender'],
None,
me['image']['url'],
None
)
class TwitterSignIn(OAuthSignIn):
def __init__(self):
super(TwitterSignIn, self).__init__('twitter')
self.service = OAuth1Service(
name='twitter',
consumer_key=self.consumer_id,
consumer_secret=self.consumer_secret,
request_token_url='https://api.twitter.com/oauth/request_token',
authorize_url='https://api.twitter.com/oauth/authorize',
access_token_url='https://api.twitter.com/oauth/access_token',
base_url='https://api.twitter.com/1.1/'
)
def authorize(self):
request_token = self.service.get_request_token(
params={'oauth_callback': self.get_callback_url()}
)
session['request_token'] = request_token
return redirect(self.service.get_authorize_url(request_token[0]))
def callback(self):
request_token = session.pop('request_token')
if 'oauth_verifier' not in request.args:
return None, None, None , None , None,None,None
oauth_session = self.service.get_auth_session(
request_token[0],
request_token[1],
data={'oauth_verifier': request.args['oauth_verifier']}
)
me = oauth_session.get('account/verify_credentials.json').json()
social_id = 'twitter$' + str(me.get('id'))
username = me.get('screen_name')
return social_id, username, None , None , None , None,None # Twitter does not provide email
|
sandeep6189/Pmp-Webapp
|
oauth.py
|
Python
|
bsd-3-clause
| 5,795
|
#!/usr/bin/env python3
# Copyright (c) 2014-present, The osquery authors
#
# This source code is licensed as defined by the LICENSE file found in the
# root directory of this source tree.
#
# SPDX-License-Identifier: (Apache-2.0 OR GPL-2.0-only)
import glob
import os
import signal
import shutil
import time
import unittest
# osquery-specific testing utils
import test_base
class DaemonTests(test_base.ProcessGenerator, unittest.TestCase):
@test_base.flaky
def test_1_daemon_without_watchdog(self):
daemon = self._run_daemon({
"disable_watchdog": True,
"disable_extensions": True,
})
self.assertTrue(daemon.isAlive())
daemon.kill()
@test_base.flaky
def test_2_daemon_with_option(self):
logger_path = test_base.getTestDirectory(test_base.TEMP_DIR)
daemon = self._run_daemon(
{
"disable_watchdog": True,
"disable_extensions": True,
"disable_logging": False,
},
options_only={
"logger_path": logger_path,
"verbose": True,
})
self.assertTrue(daemon.isAlive())
info_path = os.path.join(logger_path, "osqueryd.INFO*")
def info_exists():
return len(glob.glob(info_path)) > 0
# Wait for the daemon to flush to GLOG.
test_base.expectTrue(info_exists)
# Assign the variable after we have assurances it exists
self.assertTrue(info_exists())
# Lastly, verify that we have permission to read the file
data = ''
with open(glob.glob(info_path)[0], 'r') as fh:
try:
data = fh.read()
except:
pass
self.assertTrue(len(data) > 0)
daemon.kill()
@test_base.flaky
def test_3_daemon_with_watchdog(self):
# This test does not join the service threads properly (waits for int).
if os.environ.get('SANITIZE') is not None:
return
daemon = self._run_daemon({
"allow_unsafe": True,
"disable_watchdog": False,
"ephemeral": True,
"disable_database": True,
"disable_logging": True,
})
self.assertTrue(daemon.isAlive())
# Check that the daemon spawned a child process
children = daemon.getChildren()
self.assertTrue(len(children) > 0)
daemon.kill()
# This will take a few moments to make sure the client process
# dies when the watcher goes away
self.assertTrue(daemon.isDead(children[0]))
@test_base.flaky
def test_3_daemon_lost_worker(self):
# Test that killed workers are respawned by the watcher
if os.environ.get('SANITIZE') is not None:
return
daemon = self._run_daemon({
"allow_unsafe": True,
"disable_watchdog": False,
"ephemeral": True,
"disable_database": True,
"disable_logging": True,
})
self.assertTrue(daemon.isAlive())
# Check that the daemon spawned a child process
children = daemon.getChildren()
self.assertTrue(len(children) > 0)
# Kill only the child worker
os.kill(children[0], signal.SIGINT)
self.assertTrue(daemon.isDead(children[0]))
self.assertTrue(daemon.isAlive())
# Expect the children of the daemon to be respawned
def waitDaemonChildren():
children = daemon.getChildren()
return len(children) > 0
test_base.expectTrue(waitDaemonChildren)
children = daemon.getChildren()
self.assertTrue(len(children) > 0)
@test_base.flaky
def test_4_daemon_sighup(self):
# A hangup signal should not do anything to the daemon.
daemon = self._run_daemon({
"disable_watchdog": True,
})
self.assertTrue(daemon.isAlive())
# Send SIGHUP on posix. Windows does not have SIGHUP so we use SIGTERM
sig = signal.SIGHUP if os.name != "nt" else signal.SIGTERM
os.kill(daemon.proc.pid, sig)
self.assertTrue(daemon.isAlive())
@test_base.flaky
def test_5_daemon_sigint(self):
# An interrupt signal will cause the daemon to stop.
daemon = self._run_daemon({
"disable_watchdog": True,
"ephemeral": True,
"disable_database": True,
"disable_logging": True,
})
self.assertTrue(daemon.isAlive())
# Send a SIGINT
os.kill(daemon.pid, signal.SIGINT)
self.assertTrue(daemon.isDead(daemon.pid, 10))
if os.name != "nt":
self.assertEqual(daemon.retcode, 0)
@test_base.flaky
def test_6_logger_mode(self):
logger_path = test_base.getTestDirectory(test_base.TEMP_DIR)
test_mode = 0o754 # Strange mode that should never exist
daemon = self._run_daemon(
{
"disable_watchdog": True,
"disable_extensions": True,
"disable_logging": False,
},
options_only={
"logger_path": logger_path,
"logger_mode": test_mode,
"verbose": True,
})
self.assertTrue(daemon.isAlive())
# Wait for the daemon to write the info log to disk before continuing
info_path = os.path.join(logger_path, "osqueryd.INFO*")
def info_exists():
return len(glob.glob(info_path)) > 0
results_path = os.path.join(logger_path, "osqueryd.results.log")
def results_exists():
return os.path.exists(results_path)
# Wait for the daemon to flush to GLOG.
test_base.expectTrue(info_exists)
test_base.expectTrue(results_exists)
info_path = glob.glob(info_path)[0]
# Both log files should exist, the results should have the given mode.
for pth in [info_path, results_path]:
self.assertTrue(os.path.exists(pth))
# Only apply the mode checks to .log files.
# TODO: Add ACL checks for Windows logs
if pth.find('.log') > 0 and os.name != "nt":
rpath = os.path.realpath(pth)
mode = os.stat(rpath).st_mode & 0o777
self.assertEqual(mode, test_mode)
daemon.kill()
def test_7_logger_stdout(self):
logger_path = test_base.getTestDirectory(test_base.TEMP_DIR)
daemon = self._run_daemon({
"disable_watchdog": True,
"disable_extensions": True,
"disable_logging": False,
"logger_plugin": "stdout",
"logger_path": logger_path,
"verbose": True,
})
info_path = os.path.join(logger_path, "osqueryd.INFO")
def pathDoesntExist():
if os.path.exists(info_path):
return False
return True
self.assertTrue(daemon.isAlive())
self.assertTrue(pathDoesntExist())
daemon.kill()
def test_8_hostid_uuid(self):
# Test added to test using UUID as hostname ident for issue #3195
daemon = self._run_daemon({
"disable_watchdog": True,
"disable_extensions": True,
"disable_logging": False,
"logger_plugin": "stdout",
"host_identifier": "uuid",
"verbose": True,
})
self.assertTrue(daemon.isAlive())
daemon.kill()
def test_9_hostid_instance(self):
daemon = self._run_daemon({
"disable_watchdog": True,
"disable_extensions": True,
"disable_logging": False,
"logger_plugin": "stdout",
"host_identifier": "instance",
"verbose": True,
})
self.assertTrue(daemon.isAlive())
daemon.kill()
def test_config_check_exits(self):
daemon = self._run_daemon({
"config_check": True,
"disable_extensions": True,
"disable_logging": False,
"disable_database": True,
"logger_plugin": "stdout",
"verbose": True,
})
self.assertTrue(daemon.isDead(daemon.pid, 10))
if os.name != "nt":
self.assertEqual(daemon.retcode, 0)
def test_config_dump_exits(self):
daemon = self._run_daemon({
"config_dump": True,
"disable_extensions": True,
"disable_logging": False,
"disable_database": True,
"logger_plugin": "stdout",
"verbose": True,
})
self.assertTrue(daemon.isDead(daemon.pid, 10))
if os.name != "nt":
self.assertEqual(daemon.retcode, 0)
def test_database_dump_exits(self):
daemon = self._run_daemon({
"database_dump": True,
"disable_extensions": True,
"disable_logging": False,
"disable_database": True,
"logger_plugin": "stdout",
"verbose": True,
})
self.assertTrue(daemon.isDead(daemon.pid, 10))
if os.name != "nt":
self.assertEqual(daemon.retcode, 0)
if __name__ == '__main__':
with test_base.CleanChildProcesses():
test_base.Tester().run()
|
hackgnar/osquery
|
tools/tests/test_osqueryd.py
|
Python
|
bsd-3-clause
| 9,307
|
import os
import sys
import time
import itertools
def get_sleeper():
if os.isatty(sys.stdout.fileno()):
return PrettySleeper()
return Sleeper()
_PROGRESS_WIDTH = 50
_WATCHING_MESSAGE = "Watching for changes... "
_PROGRESS_BAR = [
_WATCHING_MESSAGE + "".join('>' if offset == position else ' ' for offset in range(_PROGRESS_WIDTH - len(_WATCHING_MESSAGE)))
for position in range(_PROGRESS_WIDTH - len(_WATCHING_MESSAGE))
]
_WIPE_STRING = "\b" * _PROGRESS_WIDTH
class PrettySleeper(object):
def __init__(self):
super(PrettySleeper, self).__init__()
self._bar_iterator = itertools.cycle(_PROGRESS_BAR)
def sleep(self, seconds):
sys.stdout.write(next(self._bar_iterator))
sys.stdout.flush()
sys.stdout.write(_WIPE_STRING)
time.sleep(seconds)
def wake(self):
sys.stdout.write(" " * _PROGRESS_WIDTH)
sys.stdout.write(_WIPE_STRING)
sys.stdout.flush()
|
vmalloc/redgreen
|
redgreen/sleeper.py
|
Python
|
bsd-3-clause
| 960
|
"""Code for performing requests"""
import json
import logging
import urllib.request
import zlib
from urllib.error import HTTPError
import requests
from defusedxml import ElementTree
from scout.constants import CHROMOSOMES, HPO_URL, HPOTERMS_URL
from scout.utils.ensembl_rest_clients import EnsemblBiomartClient
LOG = logging.getLogger(__name__)
TIMEOUT = 20
def post_request_json(url, data, headers=None):
"""Send json data via POST request and return response
Args:
url(str): url to send request to
data(dict): data to be sent
headers(dict): request headers
Returns:
json_response(dict)
"""
resp = None
json_response = {}
try:
LOG.debug(f"Sending POST request with json data to {url}")
if headers:
resp = requests.post(url, headers=headers, json=data)
else:
resp = requests.post(url, json=data)
json_response["content"] = resp.json()
except Exception as ex:
return {"message": f"An error occurred while sending a POST request to url {url} -> {ex}"}
json_response["status_code"] = resp.status_code
return json_response
def get_request_json(url, headers=None):
"""Send GET request and return response's json data
Args:
url(str): url to send request to
headers(dict): eventual request HEADERS to use in request
Returns:
json_response(dict), example {"status_code":200, "content":{original json content}}
"""
resp = None
json_response = {}
try:
LOG.debug(f"Sending GET request to {url}")
if headers:
resp = requests.get(url, timeout=TIMEOUT, headers=headers)
else:
resp = requests.get(url, timeout=TIMEOUT)
json_response["content"] = resp.json()
except Exception as ex:
return {"message": f"An error occurred while sending a GET request to url {url} -> {ex}"}
json_response["status_code"] = resp.status_code
return json_response
def delete_request_json(url, headers=None, data=None):
"""Send a DELETE request to a remote API and return its response
Args:
url(str): url to send request to
headers(dict): eventual request HEADERS to use in request
data(dict): eventual request data to ba passed as a json object
Returns:
json_response(dict)
"""
resp = None
json_response = {}
try:
LOG.debug(f"Sending DELETE request to {url}")
if headers and data:
resp = requests.delete(url, headers=headers, json=data)
elif headers:
resp = requests.delete(url, headers=headers)
else:
resp = requests.delete(url)
json_response["content"] = resp.json()
except Exception as ex:
return {"message": f"An error occurred while sending a DELETE request to url {url} -> {ex}"}
json_response["status_code"] = resp.status_code
return json_response
def get_request(url):
"""Return a requests response from url
Args:
url(str)
Returns:
decoded_data(str): Decoded response
"""
try:
LOG.info("Requesting %s", url)
response = requests.get(url, timeout=TIMEOUT)
if response.status_code != 200:
response.raise_for_status()
LOG.info("Encoded to %s", response.encoding)
except requests.exceptions.HTTPError as err:
LOG.warning("Something went wrong, perhaps the api key is not valid?")
raise err
except requests.exceptions.MissingSchema as err:
LOG.warning("Something went wrong, perhaps url is invalid?")
raise err
except requests.exceptions.Timeout as err:
LOG.error("socket timed out - URL %s", url)
raise err
return response
def fetch_resource(url, json=False):
"""Fetch a resource and return the resulting lines in a list or a json object
Send file_name to get more clean log messages
Args:
url(str)
json(bool): if result should be in json
Returns:
data
"""
data = None
if url.startswith("ftp"):
# requests do not handle ftp
response = urllib.request.urlopen(url, timeout=TIMEOUT)
if isinstance(response, Exception):
raise response
data = response.read().decode("utf-8")
return data.split("\n")
response = get_request(url)
if json:
LOG.info("Return in json")
data = response.json()
else:
content = response.text
if response.url.endswith(".gz"):
LOG.info("gzipped!")
encoded_content = b"".join(chunk for chunk in response.iter_content(chunk_size=128))
content = zlib.decompress(encoded_content, 16 + zlib.MAX_WBITS).decode("utf-8")
data = content.split("\n")
return data
def fetch_hpo_terms():
"""Fetch the latest version of the hpo terms in .obo format
Returns:
res(list(str)): A list with the lines
"""
url = HPOTERMS_URL
return fetch_resource(url)
def fetch_genes_to_hpo_to_disease():
"""Fetch the latest version of the map from genes to phenotypes
Returns:
res(list(str)): A list with the lines formatted this way:
#Format: entrez-gene-id<tab>entrez-gene-symbol<tab>HPO-Term-Name<tab>\
HPO-Term-ID<tab>Frequency-Raw<tab>Frequency-HPO<tab>
Additional Info from G-D source<tab>G-D source<tab>disease-ID for link
72 ACTG2 HP:0002027 Abdominal pain - mim2gene OMIM:155310
72 ACTG2 HP:0000368 Low-set, posteriorly rotated ears HP:0040283 orphadata
ORPHA:2604
"""
url = HPO_URL.format("genes_to_phenotype.txt")
return fetch_resource(url)
def fetch_hpo_to_genes_to_disease():
"""Fetch the latest version of the map from phenotypes to genes
Returns:
res(list(str)): A list with the lines formatted this way:
#Format: HPO-id<tab>HPO label<tab>entrez-gene-id<tab>entrez-gene-symbol\
<tab>Additional Info from G-D source<tab>G-D source
<tab>disease-ID for link
HP:0000002 Abnormality of body height 3954 LETM1 - mim2gene OMIM:194190
HP:0000002 Abnormality of body height 197131 UBR1 - mim2gene OMIM:243800
HP:0000002 Abnormality of body height 79633 FAT4 orphadata ORPHA:314679
"""
url = HPO_URL.format("phenotype_to_genes.txt")
return fetch_resource(url)
def fetch_hpo_files(genes_to_phenotype=False, phenotype_to_genes=False, hpo_terms=False):
"""
Fetch the necessary HPO files from http://compbio.charite.de
Args:
genes_to_phenotype(bool): if file genes_to_phenotype.txt is required
phenotype_to_genes(bool): if file phenotype_to_genes.txt is required
hpo_terms(bool):if file hp.obo is required
Returns:
hpo_files(dict): A dictionary with the necessary files
"""
LOG.info("Fetching HPO information from http://compbio.charite.de")
hpo_files = {}
if genes_to_phenotype is True:
hpo_files["genes_to_phenotype"] = fetch_genes_to_hpo_to_disease()
if phenotype_to_genes is True:
hpo_files["phenotype_to_genes"] = fetch_hpo_to_genes_to_disease()
if hpo_terms is True:
hpo_files["hpo_terms"] = fetch_hpo_terms()
return hpo_files
def fetch_mim_files(api_key, mim2genes=False, mimtitles=False, morbidmap=False, genemap2=False):
"""Fetch the necessary mim files using a api key
Args:
api_key(str): A api key necessary to fetch mim data
Returns:
mim_files(dict): A dictionary with the neccesary files
"""
LOG.info("Fetching OMIM files from https://omim.org/")
mim2genes_url = "https://omim.org/static/omim/data/mim2gene.txt"
mimtitles_url = "https://data.omim.org/downloads/{0}/mimTitles.txt".format(api_key)
morbidmap_url = "https://data.omim.org/downloads/{0}/morbidmap.txt".format(api_key)
genemap2_url = "https://data.omim.org/downloads/{0}/genemap2.txt".format(api_key)
mim_files = {}
mim_urls = {}
if mim2genes is True:
mim_urls["mim2genes"] = mim2genes_url
if mimtitles is True:
mim_urls["mimtitles"] = mimtitles_url
if morbidmap is True:
mim_urls["morbidmap"] = morbidmap_url
if genemap2 is True:
mim_urls["genemap2"] = genemap2_url
for file_name in mim_urls:
url = mim_urls[file_name]
mim_files[file_name] = fetch_resource(url)
return mim_files
def fetch_ensembl_biomart(attributes, filters, build=None):
"""Fetch data from ensembl biomart
Args:
attributes(list): List of selected attributes
filters(dict): Select what filters to use
build(str): '37' or '38'
Returns:
client(EnsemblBiomartClient)
"""
build = build or "37"
client = EnsemblBiomartClient(build=build, filters=filters, attributes=attributes)
LOG.info("Selecting attributes: %s", ", ".join(attributes))
LOG.info("Use filter: %s", filters)
return client
def fetch_ensembl_genes(build=None, chromosomes=None):
"""Fetch the ensembl genes
Args:
build(str): ['37', '38']
chromosomes(iterable(str))
Returns:
result(iterable): Ensembl formated gene lines
"""
chromosomes = chromosomes or CHROMOSOMES
LOG.info("Fetching ensembl genes")
attributes = [
"chromosome_name",
"start_position",
"end_position",
"ensembl_gene_id",
"hgnc_symbol",
"hgnc_id",
]
filters = {"chromosome_name": chromosomes}
return fetch_ensembl_biomart(attributes, filters, build)
def fetch_ensembl_transcripts(build=None, chromosomes=None):
"""Fetch the ensembl genes
Args:
build(str): ['37', '38']
chromosomes(iterable(str))
Returns:
result(iterable): Ensembl formated transcript lines
"""
chromosomes = chromosomes or CHROMOSOMES
LOG.info("Fetching ensembl transcripts")
attributes = [
"chromosome_name",
"ensembl_gene_id",
"ensembl_transcript_id",
"transcript_start",
"transcript_end",
"refseq_mrna",
"refseq_mrna_predicted",
"refseq_ncrna",
]
filters = {"chromosome_name": chromosomes}
return fetch_ensembl_biomart(attributes, filters, build)
def fetch_ensembl_exons(build=None, chromosomes=None):
"""Fetch the ensembl genes
Args:
build(str): ['37', '38']
chromosomes(iterable(str))
"""
chromosomes = chromosomes or CHROMOSOMES
LOG.info("Fetching ensembl exons")
attributes = [
"chromosome_name",
"ensembl_gene_id",
"ensembl_transcript_id",
"ensembl_exon_id",
"exon_chrom_start",
"exon_chrom_end",
"5_utr_start",
"5_utr_end",
"3_utr_start",
"3_utr_end",
"strand",
"rank",
]
filters = {"chromosome_name": chromosomes}
return fetch_ensembl_biomart(attributes, filters, build)
def fetch_hgnc():
"""Fetch the hgnc genes file from
ftp://ftp.ebi.ac.uk/pub/databases/genenames/new/tsv/hgnc_complete_set.txt
Returns:
hgnc_gene_lines(list(str))
"""
file_name = "hgnc_complete_set.txt"
url = "ftp://ftp.ebi.ac.uk/pub/databases/genenames/new/tsv/{0}".format(file_name)
LOG.info("Fetching HGNC genes from %s", url)
hgnc_lines = fetch_resource(url)
return hgnc_lines
def fetch_exac_constraint():
"""Fetch the file with exac constraint scores
Returns:
exac_lines(iterable(str))
"""
file_name = "fordist_cleaned_exac_r03_march16_z_pli_rec_null_data.txt"
url = (
"ftp://ftp.broadinstitute.org/pub/ExAC_release/release0.3/functional_gene_constraint" "/{0}"
).format(file_name)
exac_lines = None
LOG.info("Fetching ExAC genes")
try:
exac_lines = fetch_resource(url)
except HTTPError:
LOG.info("Failed to fetch exac constraint scores file from ftp server")
LOG.info("Try to fetch from google bucket...")
url = (
"https://storage.googleapis.com/gnomad-public/legacy/exacv1_downloads/release0.3.1"
"/manuscript_data/forweb_cleaned_exac_r03_march16_z_data_pLI.txt.gz"
)
if not exac_lines:
exac_lines = fetch_resource(url)
return exac_lines
def fetch_refseq_version(refseq_acc):
"""Fetch refseq version from entrez and return refseq version
Args:
refseq_acc(str) example: NM_020533
Returns
version(str) example: NM_020533.3 or NM_020533 if no version associated is found
"""
version = refseq_acc
base_url = (
"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=nuccore&"
"term={}&idtype=acc"
)
try:
resp = get_request(base_url.format(refseq_acc))
tree = ElementTree.fromstring(resp.content)
version = tree.find("IdList").find("Id").text or version
except (
requests.exceptions.HTTPError,
requests.exceptions.MissingSchema,
AttributeError,
):
LOG.warning("refseq accession not found")
return version
|
Clinical-Genomics/scout
|
scout/utils/scout_requests.py
|
Python
|
bsd-3-clause
| 13,130
|
emails = sorted(set([line.strip() for line in open("email_domains.txt")]))
for email in emails:
print("'{email}',".format(email=email))
|
aaronbassett/DisposableEmailChecker
|
build_list.py
|
Python
|
bsd-3-clause
| 141
|
#!/usr/bin/python
# Copyright (c) Arni Mar Jonsson.
# See LICENSE for details.
import rocksdb, pointless, random, string, itertools, collections
from twisted.internet import reactor, defer, threads
def compare(a, b):
return cmp(a, b)
c = 'bytewise'
c = ('rocksdb.BytewiseComparator', compare)
kw = {
'create_if_missing': True,
'error_if_exists': False,
'paranoid_checks': False,
'block_cache_size': 8 * (2 << 20),
'write_buffer_size': 2 * (2 << 20),
'block_size': 4096,
'max_open_files': 1000,
'block_restart_interval': 16,
'comparator': c
}
def random_value(n):
return bytearray(''.join(random.choice(string.ascii_letters) for i in xrange(n)))
def generate_data():
random.seed(0)
k_ = []
v_ = []
for i in xrange(1000000):
k = random_value(8)
v = random_value(8)
k_.append(k)
v_.append(v)
pointless.serialize([k_, v_], 'data.map')
@defer.inlineCallbacks
def insert_alot(db, kv, ops, stop):
while not stop[0]:
k = random.choice(kv[0])
v = random.choice(kv[1])
yield threads.deferToThread(db.Put, k, v)
ops['n_insert'] += 1
if ops['n_insert'] > 0 and ops['n_insert'] % 1000 == 0:
print 'INFO: n_insert: %iK' % (ops['n_insert'] // 1000,)
@defer.inlineCallbacks
def scan_alot(db, kv, ops, stop):
n_scans = 0
while not stop[0]:
k_a = random.choice(kv[0])
k_b = random.choice(kv[0])
k_a, k_b = min(k_a, k_b), min(k_a, k_b)
i = db.RangeIter(k_a, k_b)
n_max = random.randint(100, 10000)
for c in itertools.count():
try:
next = yield threads.deferToThread(i.next)
except StopIteration:
break
if c > n_max:
break
ops['n_scans'] += 1
if ops['n_scans'] > 0 and ops['n_scans'] % 1000 == 0:
print 'INFO: n_scans: %iK' % (ops['n_scans'] // 1000,)
def main():
#generate_data()
reactor.suggestThreadPoolSize(20)
kv = pointless.Pointless('/home/arni/py-rocksdb/data.map', allow_print = False).GetRoot()
db = rocksdb.RocksDB('./db', **kw)
stop = [False]
def do_stop():
stop[0] = True
reactor.stop()
ops = collections.defaultdict(int)
for i in xrange(10):
reactor.callWhenRunning(insert_alot, db, kv, ops, stop)
reactor.callWhenRunning(scan_alot, db, kv, ops, stop)
reactor.callLater(10000.0, do_stop)
reactor.run()
if __name__ == '__main__':
main()
|
botify-labs/py-rocksdb
|
test/try.py
|
Python
|
bsd-3-clause
| 2,259
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Copyright (c) 2014, Kersten Doering <kersten.doering@gmail.com>, Bjoern Gruening <bjoern.gruening@gmail.com>
"""
#Kersten Doering 16.06.2014
#check http://xapian.org/docs/queryparser.html for syntax and functions
import xappy
searchConn = xappy.SearchConnection("xapian/xapian2015")
searchConn.reopen()
#########################
querystring1 = "scaffolds"
querystring2 = "finger"
#in the following example, "pancreatic cancer" and "Erlotinib" are not allowed to have more than 4 other words between them"
#"title" and "text" are searched with Xapian
#"pancreatic cancer" is split into two terms and connected with the other query using "NEAR"
terms = querystring1.split(' ')
querystring1 = " NEAR/3 ".join(terms)#not more than 2 words are allowed to be between "pancreatic" and "cancer"
#NEAR searches without considering the word order, while in case of ADJ the word order is fixed
title = querystring1 + " NEAR/5 " + querystring2#adjusting the limit of words between the terms changes the results
#same query can be done for the field "text" which is the PubMed abstract and both query fields can be connected with logical OR - look at search_title_or_text.py or search_not_title_or_text.py
#notice that this becomes a phrase search now for the single terms
title_q = searchConn.query_field('title', title)
print "search query: ", title_q
#save all machting documents in "results" (starting with rank 0 - check help documentation of function "search")
results = searchConn.search(title_q, 0, searchConn.get_doccount())
print "number of matches: ", results.matches_estimated
### debug: ###
#print "Rank\tPubMed-ID\tTitle (query term highlighted)"
#for index,result in enumerate(results):
# if "<b>" in results.get_hit(index).highlight('title')[0]:
# print index, "\t", result.id, "\t", results.get_hit(index).highlight('title')[0]
# else:
# print resuld.id, "does not contain a highlighted term"
## if index > 5:
## break
#HTML output:
#open HTML file
outfile = open("Xapian_query_results_NEAR.html","w")
#document header
start_string = """
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html><head>
<meta http-equiv="content-type" content="text/html; charset=windows-1252">
<title>Xapian_query_results_NEAR</title>
</head>
<body>
<table border="1" width="100%">
<tbody><tr>
<th>Rank</th>
<th>PubMed-ID</th>
<th>Title (query term highlighted)</th>
</tr>
"""
#string for finishing HTML document
end_string = """
</tbody></table>
</body></html>
"""
#write header
outfile.write(start_string)
print "### save results in Xapian_query_results_NEAR.html ###"
#write the first 1000 PubMed-IDs and titles with term "pancreatic" or stem "pancreat"
for index,result in enumerate(results):
outfile.write("<tr><td>" + str(index) + "</td><td>" + result.id + "</td><td>" + results.get_hit(index).highlight('title')[0] +"</td></tr>")
# if index == 999:
# break
#write string for finishing HTML document
outfile.write(end_string)
#close file connection
outfile.close()
#close connection to Xapian database
#searchConn.close()
|
telukir/PubMed2Go
|
full_text_index/search_near_title.py
|
Python
|
isc
| 3,208
|
import argparse
import docker
import logging
import os
import docket
logger = logging.getLogger('docket')
logging.basicConfig()
parser = argparse.ArgumentParser(description='')
parser.add_argument('-t --tag', dest='tag', help='tag for final image')
parser.add_argument('--verbose', dest='verbose', action='store_true', help='verbose output', default=False)
parser.add_argument('--no-cache', dest='no_cache', action='store_true', help='Do not use cache when building the image', default=False)
parser.add_argument('buildpath', nargs='*')
args = parser.parse_args()
if args.verbose:
logger.setLevel(logging.DEBUG)
cert_path = os.environ.get('DOCKER_CERT_PATH', '')
tls_verify = os.environ.get('DOCKER_TLS_VERIFY', '0')
base_url = os.environ.get('DOCKER_HOST', 'tcp://127.0.0.1:2375')
base_url = base_url.replace('tcp:', 'https:')
tls_config = None
if cert_path:
tls_config = docker.tls.TLSConfig(verify=tls_verify,
client_cert=(os.path.join(cert_path, 'cert.pem'), os.path.join(cert_path, 'key.pem')),
ca_cert=os.path.join(cert_path, 'ca.pem')
)
client = docker.Client(base_url=base_url, version='1.15', timeout=10, tls=tls_config)
tag = args.tag or None
buildpath = args.buildpath[0]
def main():
docket.build(client=client, tag=tag, buildpath=buildpath, no_cache=args.no_cache)
exit()
if __name__ == '__main__':
main()
|
clarete/docket
|
docket/command_line.py
|
Python
|
mit
| 1,369
|
# Bug in 2.7 base64.py
_b32alphabet = {
0: 'A', 9: 'J', 18: 'S', 27: '3',
1: 'B', 10: 'K', 19: 'T', 28: '4',
2: 'C', 11: 'L', 20: 'U', 29: '5',
3: 'D', 12: 'M', 21: 'V', 30: '6',
4: 'E', 13: 'N', 22: 'W', 31: '7',
5: 'F', 14: 'O', 23: 'X',
6: 'G', 15: 'P', 24: 'Y',
7: 'H', 16: 'Q', 25: 'Z',
8: 'I', 17: 'R', 26: '2',
}
|
moagstar/python-uncompyle6
|
test/simple_source/expression/03_map.py
|
Python
|
mit
| 361
|
#! /usr/bin/env python
from openturns import *
TESTPREAMBLE()
RandomGenerator.SetSeed(0)
try:
# Instanciate one distribution object
dimension = 3
meanPoint = NumericalPoint(dimension, 1.0)
meanPoint[0] = 0.5
meanPoint[1] = -0.5
sigma = NumericalPoint(dimension, 1.0)
sigma[0] = 2.0
sigma[1] = 3.0
R = CorrelationMatrix(dimension)
for i in range(1, dimension):
R[i, i - 1] = 0.5
# Create a collection of distribution
aCollection = DistributionCollection()
aCollection.add(Normal(meanPoint, sigma, R))
meanPoint += NumericalPoint(meanPoint.getDimension(), 1.0)
aCollection.add(Normal(meanPoint, sigma, R))
meanPoint += NumericalPoint(meanPoint.getDimension(), 1.0)
aCollection.add(Normal(meanPoint, sigma, R))
# Instanciate one distribution object
distribution = Mixture(
aCollection, NumericalPoint(aCollection.getSize(), 2.0))
print "Distribution ", repr(distribution)
print "Weights = ", repr(distribution.getWeights())
weights = distribution.getWeights()
weights[0] = 2.0 * weights[0]
distribution.setWeights(weights)
print "After update, new weights = ", repr(distribution.getWeights())
distribution = Mixture(aCollection)
print "Distribution ", repr(distribution)
# Is this distribution elliptical ?
print "Elliptical = ", distribution.isElliptical()
# Is this distribution continuous ?
print "Continuous = ", distribution.isContinuous()
# Test for realization of distribution
oneRealization = distribution.getRealization()
print "oneRealization=", repr(oneRealization)
# Test for sampling
size = 1000
oneSample = distribution.getSample(size)
print "oneSample first=", repr(oneSample[0]), " last=", repr(oneSample[size - 1])
print "mean=", repr(oneSample.computeMean())
print "covariance=", repr(oneSample.computeCovariance())
# Define a point
point = NumericalPoint(distribution.getDimension(), 1.0)
print "Point= ", repr(point)
# Show PDF and CDF of point
eps = 1e-5
# derivative of PDF with regards its arguments
DDF = distribution.computeDDF(point)
print "ddf =", repr(DDF)
# by the finite difference technique
ddfFD = NumericalPoint(dimension)
for i in range(dimension):
left = NumericalPoint(point)
left[i] += eps
right = NumericalPoint(point)
right[i] -= eps
ddfFD[i] = (distribution.computePDF(left) -
distribution.computePDF(right)) / (2.0 * eps)
print "ddf (FD)=", repr(ddfFD)
# PDF value
LPDF = distribution.computeLogPDF(point)
print "log pdf=%.6f" % LPDF
PDF = distribution.computePDF(point)
print "pdf =%.6f" % PDF
# by the finite difference technique from CDF
if (dimension == 1):
print "pdf (FD)=%.6f" % ((distribution.computeCDF(point + NumericalPoint(1, eps)) - distribution.computeCDF(point + NumericalPoint(1, -eps))) / (2.0 * eps))
# derivative of the PDF with regards the parameters of the distribution
CDF = distribution.computeCDF(point)
print "cdf=%.6f" % CDF
CCDF = distribution.computeComplementaryCDF(point)
print "ccdf=%.6f" % CCDF
# PDFgr = distribution.computePDFGradient( point )
# print "pdf gradient =" , repr(PDFgr)
# by the finite difference technique
# PDFgrFD = NumericalPoint(4)
# PDFgrFD[0] = (Mixture(distribution.getR() + eps, distribution.getT(), distribution.getA(), distribution.getB()).computePDF(point) -
# Mixture(distribution.getR() - eps, distribution.getT(), distribution.getA(), distribution.getB()).computePDF(point)) / (2.0 * eps)
# PDFgrFD[1] = (Mixture(distribution.getR(), distribution.getT() + eps, distribution.getA(), distribution.getB()).computePDF(point) -
# Mixture(distribution.getR(), distribution.getT() - eps, distribution.getA(), distribution.getB()).computePDF(point)) / (2.0 * eps)
# PDFgrFD[2] = (Mixture(distribution.getR(), distribution.getT(), distribution.getA() + eps, distribution.getB()).computePDF(point) -
# Mixture(distribution.getR(), distribution.getT(), distribution.getA() - eps, distribution.getB()).computePDF(point)) / (2.0 * eps)
# PDFgrFD[3] = (Mixture(distribution.getR(), distribution.getT(), distribution.getA(), distribution.getB() + eps).computePDF(point) -
# Mixture(distribution.getR(), distribution.getT(), distribution.getA(), distribution.getB() - eps).computePDF(point)) / (2.0 * eps)
# print "pdf gradient (FD)=" , repr(PDFgrFD)
# derivative of the PDF with regards the parameters of the distribution
# CDFgr = distribution.computeCDFGradient( point )
# print "cdf gradient =" , repr(CDFgr)
# CDFgrFD = NumericalPoint(4)
# CDFgrFD[0] = (Mixture(distribution.getR() + eps, distribution.getT(), distribution.getA(), distribution.getB()).computeCDF(point) -
# Mixture(distribution.getR() - eps, distribution.getT(), distribution.getA(), distribution.getB()).computeCDF(point)) / (2.0 * eps)
# CDFgrFD[1] = (Mixture(distribution.getR(), distribution.getT() + eps, distribution.getA(), distribution.getB()).computeCDF(point) -
# Mixture(distribution.getR(), distribution.getT() - eps, distribution.getA(), distribution.getB()).computeCDF(point)) / (2.0 * eps)
# CDFgrFD[2] = (Mixture(distribution.getR(), distribution.getT(), distribution.getA() + eps, distribution.getB()).computeCDF(point) -
# Mixture(distribution.getR(), distribution.getT(), distribution.getA() - eps, distribution.getB()).computeCDF(point)) / (2.0 * eps)
# CDFgrFD[3] = (Mixture(distribution.getR(), distribution.getT(), distribution.getA(), distribution.getB() + eps).computeCDF(point) -
# Mixture(distribution.getR(), distribution.getT(), distribution.getA(), distribution.getB() - eps).computeCDF(point)) / (2.0 * eps)
# print "cdf gradient (FD)=", repr(CDFgrFD)
# quantile
quantile = distribution.computeQuantile(0.95)
print "quantile=", repr(quantile)
print "cdf(quantile)=%.6f" % distribution.computeCDF(quantile)
mean = distribution.getMean()
print "mean=", repr(mean)
covariance = distribution.getCovariance()
print "covariance=", repr(covariance)
parameters = distribution.getParametersCollection()
print "parameters=", repr(parameters)
for i in range(6):
print "standard moment n=", i, " value=", distribution.getStandardMoment(i)
print "Standard representative=", distribution.getStandardRepresentative()
# Constructor with separate weights. Also check small weights removal
weights = [1.0e-20, 2.5, 32.0]
atoms = DistributionCollection(
[Normal(1.0, 1.0), Normal(2.0, 2.0), Normal(3.0, 3.0)])
newMixture = Mixture(atoms, weights)
print "newMixture pdf= %.12g" % newMixture.computePDF(2.5)
print "atoms kept in mixture=", newMixture.getDistributionCollection()
print "newMixture=", newMixture
except:
import sys
print "t_Mixture_std.py", sys.exc_type, sys.exc_value
|
sofianehaddad/ot-svn
|
python/test/t_Mixture_std.py
|
Python
|
mit
| 7,198
|
doTimingAttackMitigation = False
import base64
import errno
import math
import time
import threading
import shared
import hashlib
import os
import select
import socket
import random
import ssl
from struct import unpack, pack
import sys
import traceback
from binascii import hexlify
#import string
#from subprocess import call # used when the API must execute an outside program
#from pyelliptic.openssl import OpenSSL
#import highlevelcrypto
from addresses import *
from class_objectHashHolder import objectHashHolder
from helper_generic import addDataPadding, isHostInPrivateIPRange
from helper_sql import sqlQuery
from debug import logger
# This thread is created either by the synSenderThread(for outgoing
# connections) or the singleListenerThread(for incoming connections).
class receiveDataThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self, name="receiveData")
self.data = ''
self.verackSent = False
self.verackReceived = False
def setup(
self,
sock,
HOST,
port,
streamNumber,
someObjectsOfWhichThisRemoteNodeIsAlreadyAware,
selfInitiatedConnections,
sendDataThreadQueue,
objectHashHolderInstance):
self.sock = sock
self.peer = shared.Peer(HOST, port)
self.name = "receiveData-" + self.peer.host.replace(":", ".") # ":" log parser field separator
self.streamNumber = streamNumber
self.objectsThatWeHaveYetToGetFromThisPeer = {}
self.selfInitiatedConnections = selfInitiatedConnections
self.sendDataThreadQueue = sendDataThreadQueue # used to send commands and data to the sendDataThread
shared.connectedHostsList[
self.peer.host] = 0 # The very fact that this receiveData thread exists shows that we are connected to the remote host. Let's add it to this list so that an outgoingSynSender thread doesn't try to connect to it.
self.connectionIsOrWasFullyEstablished = False # set to true after the remote node and I accept each other's version messages. This is needed to allow the user interface to accurately reflect the current number of connections.
self.services = 0
if self.streamNumber == -1: # This was an incoming connection. Send out a version message if we accept the other node's version message.
self.initiatedConnection = False
else:
self.initiatedConnection = True
self.selfInitiatedConnections[streamNumber][self] = 0
self.someObjectsOfWhichThisRemoteNodeIsAlreadyAware = someObjectsOfWhichThisRemoteNodeIsAlreadyAware
self.objectHashHolderInstance = objectHashHolderInstance
self.startTime = time.time()
def run(self):
logger.debug('receiveDataThread starting. ID ' + str(id(self)) + '. The size of the shared.connectedHostsList is now ' + str(len(shared.connectedHostsList)))
while True:
if shared.config.getint('bitmessagesettings', 'maxdownloadrate') == 0:
downloadRateLimitBytes = float("inf")
else:
downloadRateLimitBytes = shared.config.getint('bitmessagesettings', 'maxdownloadrate') * 1000
with shared.receiveDataLock:
while shared.numberOfBytesReceivedLastSecond >= downloadRateLimitBytes:
if int(time.time()) == shared.lastTimeWeResetBytesReceived:
# If it's still the same second that it was last time then sleep.
time.sleep(0.3)
else:
# It's a new second. Let us clear the shared.numberOfBytesReceivedLastSecond.
shared.lastTimeWeResetBytesReceived = int(time.time())
shared.numberOfBytesReceivedLastSecond = 0
dataLen = len(self.data)
try:
if ((self.services & shared.NODE_SSL == shared.NODE_SSL) and
self.connectionIsOrWasFullyEstablished and
shared.haveSSL(not self.initiatedConnection)):
dataRecv = self.sslSock.recv(1024)
else:
dataRecv = self.sock.recv(1024)
self.data += dataRecv
shared.numberOfBytesReceived += len(dataRecv) # for the 'network status' UI tab. The UI clears this value whenever it updates.
shared.numberOfBytesReceivedLastSecond += len(dataRecv) # for the download rate limit
except socket.timeout:
logger.error ('Timeout occurred waiting for data from ' + str(self.peer) + '. Closing receiveData thread. (ID: ' + str(id(self)) + ')')
break
except Exception as err:
if (sys.platform == 'win32' and err.errno in ([2, 10035])) or (sys.platform != 'win32' and err.errno == errno.EWOULDBLOCK):
select.select([self.sslSock], [], [])
continue
logger.error('sock.recv error. Closing receiveData thread (' + str(self.peer) + ', Thread ID: ' + str(id(self)) + ').' + str(err.errno) + "/" + str(err))
break
# print 'Received', repr(self.data)
if len(self.data) == dataLen: # If self.sock.recv returned no data:
logger.debug('Connection to ' + str(self.peer) + ' closed. Closing receiveData thread. (ID: ' + str(id(self)) + ')')
break
else:
self.processData()
try:
del self.selfInitiatedConnections[self.streamNumber][self]
logger.debug('removed self (a receiveDataThread) from selfInitiatedConnections')
except:
pass
self.sendDataThreadQueue.put((0, 'shutdown','no data')) # commands the corresponding sendDataThread to shut itself down.
try:
del shared.connectedHostsList[self.peer.host]
except Exception as err:
logger.error('Could not delete ' + str(self.peer.host) + ' from shared.connectedHostsList.' + str(err))
try:
del shared.numberOfObjectsThatWeHaveYetToGetPerPeer[
self.peer]
except:
pass
shared.UISignalQueue.put(('updateNetworkStatusTab', 'no data'))
logger.debug('receiveDataThread ending. ID ' + str(id(self)) + '. The size of the shared.connectedHostsList is now ' + str(len(shared.connectedHostsList)))
def antiIntersectionDelay(self, initial = False):
# estimated time for a small object to propagate across the whole network
delay = math.ceil(math.log(len(shared.knownNodes[self.streamNumber]) + 2, 20)) * (0.2 + objectHashHolder.size/2)
# +2 is to avoid problems with log(0) and log(1)
# 20 is avg connected nodes count
# 0.2 is avg message transmission time
now = time.time()
if initial and now - delay < self.startTime:
logger.debug("Initial sleeping for %.2fs", delay - (now - self.startTime))
time.sleep(delay - (now - self.startTime))
elif not initial:
logger.debug("Sleeping due to missing object for %.2fs", delay)
time.sleep(delay)
def processData(self):
if len(self.data) < shared.Header.size: # if so little of the data has arrived that we can't even read the checksum then wait for more data.
return
magic,command,payloadLength,checksum = shared.Header.unpack(self.data[:shared.Header.size])
if magic != 0xE9BEB4D9:
self.data = ""
return
if payloadLength > 1600100: # ~1.6 MB which is the maximum possible size of an inv message.
logger.info('The incoming message, which we have not yet download, is too large. Ignoring it. (unfortunately there is no way to tell the other node to stop sending it except to disconnect.) Message size: %s' % payloadLength)
self.data = self.data[payloadLength + shared.Header.size:]
del magic,command,payloadLength,checksum # we don't need these anymore and better to clean them now before the recursive call rather than after
self.processData()
return
if len(self.data) < payloadLength + shared.Header.size: # check if the whole message has arrived yet.
return
payload = self.data[shared.Header.size:payloadLength + shared.Header.size]
if checksum != hashlib.sha512(payload).digest()[0:4]: # test the checksum in the message.
logger.error('Checksum incorrect. Clearing this message.')
self.data = self.data[payloadLength + shared.Header.size:]
del magic,command,payloadLength,checksum,payload # better to clean up before the recursive call
self.processData()
return
# The time we've last seen this node is obviously right now since we
# just received valid data from it. So update the knownNodes list so
# that other peers can be made aware of its existance.
if self.initiatedConnection and self.connectionIsOrWasFullyEstablished: # The remote port is only something we should share with others if it is the remote node's incoming port (rather than some random operating-system-assigned outgoing port).
with shared.knownNodesLock:
shared.knownNodes[self.streamNumber][self.peer] = int(time.time())
#Strip the nulls
command = command.rstrip('\x00')
logger.debug('remoteCommand ' + repr(command) + ' from ' + str(self.peer))
try:
#TODO: Use a dispatcher here
if command == 'error':
self.recerror(payload)
elif not self.connectionIsOrWasFullyEstablished:
if command == 'version':
self.recversion(payload)
elif command == 'verack':
self.recverack()
else:
if command == 'addr':
self.recaddr(payload)
elif command == 'inv':
self.recinv(payload)
elif command == 'getdata':
self.recgetdata(payload)
elif command == 'object':
self.recobject(payload)
elif command == 'ping':
self.sendpong(payload)
#elif command == 'pong':
# pass
except varintDecodeError as e:
logger.debug("There was a problem with a varint while processing a message from the wire. Some details: %s" % e)
except Exception as e:
logger.critical("Critical error in a receiveDataThread: \n%s" % traceback.format_exc())
del payload
self.data = self.data[payloadLength + shared.Header.size:] # take this message out and then process the next message
if self.data == '': # if there are no more messages
while len(self.objectsThatWeHaveYetToGetFromThisPeer) > 0:
shared.numberOfInventoryLookupsPerformed += 1
objectHash, = random.sample(
self.objectsThatWeHaveYetToGetFromThisPeer, 1)
if objectHash in shared.inventory:
logger.debug('Inventory already has object listed in inv message.')
del self.objectsThatWeHaveYetToGetFromThisPeer[objectHash]
else:
# We don't have the object in our inventory. Let's request it.
self.sendgetdata(objectHash)
del self.objectsThatWeHaveYetToGetFromThisPeer[
objectHash] # It is possible that the remote node might not respond with the object. In that case, we'll very likely get it from someone else anyway.
if len(self.objectsThatWeHaveYetToGetFromThisPeer) == 0:
logger.debug('(concerning' + str(self.peer) + ') number of objectsThatWeHaveYetToGetFromThisPeer is now 0')
try:
del shared.numberOfObjectsThatWeHaveYetToGetPerPeer[
self.peer] # this data structure is maintained so that we can keep track of how many total objects, across all connections, are currently outstanding. If it goes too high it can indicate that we are under attack by multiple nodes working together.
except:
pass
break
if len(self.objectsThatWeHaveYetToGetFromThisPeer) == 0:
# We had objectsThatWeHaveYetToGetFromThisPeer but the loop ran, they were all in our inventory, and now we don't have any to get anymore.
logger.debug('(concerning' + str(self.peer) + ') number of objectsThatWeHaveYetToGetFromThisPeer is now 0')
try:
del shared.numberOfObjectsThatWeHaveYetToGetPerPeer[
self.peer] # this data structure is maintained so that we can keep track of how many total objects, across all connections, are currently outstanding. If it goes too high it can indicate that we are under attack by multiple nodes working together.
except:
pass
if len(self.objectsThatWeHaveYetToGetFromThisPeer) > 0:
logger.debug('(concerning' + str(self.peer) + ') number of objectsThatWeHaveYetToGetFromThisPeer is now ' + str(len(self.objectsThatWeHaveYetToGetFromThisPeer)))
shared.numberOfObjectsThatWeHaveYetToGetPerPeer[self.peer] = len(
self.objectsThatWeHaveYetToGetFromThisPeer) # this data structure is maintained so that we can keep track of how many total objects, across all connections, are currently outstanding. If it goes too high it can indicate that we are under attack by multiple nodes working together.
self.processData()
def sendpong(self):
logger.debug('Sending pong')
self.sendDataThreadQueue.put((0, 'sendRawData', shared.CreatePacket('pong')))
def recverack(self):
logger.debug('verack received')
self.verackReceived = True
if self.verackSent:
# We have thus both sent and received a verack.
self.connectionFullyEstablished()
def connectionFullyEstablished(self):
if self.connectionIsOrWasFullyEstablished:
# there is no reason to run this function a second time
return
self.connectionIsOrWasFullyEstablished = True
self.sslSock = self.sock
if ((self.services & shared.NODE_SSL == shared.NODE_SSL) and
shared.haveSSL(not self.initiatedConnection)):
logger.debug("Initialising TLS")
self.sslSock = ssl.wrap_socket(self.sock, keyfile = os.path.join(shared.codePath(), 'sslkeys', 'key.pem'), certfile = os.path.join(shared.codePath(), 'sslkeys', 'cert.pem'), server_side = not self.initiatedConnection, ssl_version=ssl.PROTOCOL_TLSv1, do_handshake_on_connect=False, ciphers='AECDH-AES256-SHA')
if hasattr(self.sslSock, "context"):
self.sslSock.context.set_ecdh_curve("secp256k1")
while True:
try:
self.sslSock.do_handshake()
break
except ssl.SSLError as e:
if e.errno == 2:
select.select([self.sslSock], [self.sslSock], [])
else:
break
except:
break
# Command the corresponding sendDataThread to set its own connectionIsOrWasFullyEstablished variable to True also
self.sendDataThreadQueue.put((0, 'connectionIsOrWasFullyEstablished', (self.services, self.sslSock)))
if not self.initiatedConnection:
shared.clientHasReceivedIncomingConnections = True
shared.UISignalQueue.put(('setStatusIcon', 'green'))
self.sock.settimeout(
600) # We'll send out a pong every 5 minutes to make sure the connection stays alive if there has been no other traffic to send lately.
shared.UISignalQueue.put(('updateNetworkStatusTab', 'no data'))
logger.debug('Connection fully established with ' + str(self.peer) + "\n" + \
'The size of the connectedHostsList is now ' + str(len(shared.connectedHostsList)) + "\n" + \
'The length of sendDataQueues is now: ' + str(len(shared.sendDataQueues)) + "\n" + \
'broadcasting addr from within connectionFullyEstablished function.')
# Let all of our peers know about this new node.
dataToSend = (int(time.time()), self.streamNumber, 1, self.peer.host, self.remoteNodeIncomingPort)
shared.broadcastToSendDataQueues((
self.streamNumber, 'advertisepeer', dataToSend))
self.sendaddr() # This is one large addr message to this one peer.
if not self.initiatedConnection and len(shared.connectedHostsList) > 200:
logger.info ('We are connected to too many people. Closing connection.')
self.sendDataThreadQueue.put((0, 'shutdown','no data'))
return
self.sendBigInv()
def sendBigInv(self):
# Select all hashes for objects in this stream.
bigInvList = {}
for hash in shared.inventory.unexpired_hashes_by_stream(self.streamNumber):
if hash not in self.someObjectsOfWhichThisRemoteNodeIsAlreadyAware and not self.objectHashHolderInstance.hasHash(hash):
bigInvList[hash] = 0
numberOfObjectsInInvMessage = 0
payload = ''
# Now let us start appending all of these hashes together. They will be
# sent out in a big inv message to our new peer.
for hash, storedValue in bigInvList.items():
payload += hash
numberOfObjectsInInvMessage += 1
if numberOfObjectsInInvMessage == 50000: # We can only send a max of 50000 items per inv message but we may have more objects to advertise. They must be split up into multiple inv messages.
self.sendinvMessageToJustThisOnePeer(
numberOfObjectsInInvMessage, payload)
payload = ''
numberOfObjectsInInvMessage = 0
if numberOfObjectsInInvMessage > 0:
self.sendinvMessageToJustThisOnePeer(
numberOfObjectsInInvMessage, payload)
# Used to send a big inv message when the connection with a node is
# first fully established. Notice that there is also a broadcastinv
# function for broadcasting invs to everyone in our stream.
def sendinvMessageToJustThisOnePeer(self, numberOfObjects, payload):
payload = encodeVarint(numberOfObjects) + payload
logger.debug('Sending huge inv message with ' + str(numberOfObjects) + ' objects to just this one peer')
self.sendDataThreadQueue.put((0, 'sendRawData', shared.CreatePacket('inv', payload)))
def _sleepForTimingAttackMitigation(self, sleepTime):
# We don't need to do the timing attack mitigation if we are
# only connected to the trusted peer because we can trust the
# peer not to attack
if sleepTime > 0 and doTimingAttackMitigation and shared.trustedPeer == None:
logger.debug('Timing attack mitigation: Sleeping for ' + str(sleepTime) + ' seconds.')
time.sleep(sleepTime)
def recerror(self, data):
"""
The remote node has been polite enough to send you an error message.
"""
fatalStatus, readPosition = decodeVarint(data[:10])
banTime, banTimeLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += banTimeLength
inventoryVectorLength, inventoryVectorLengthLength = decodeVarint(data[readPosition:readPosition+10])
if inventoryVectorLength > 100:
return
readPosition += inventoryVectorLengthLength
inventoryVector = data[readPosition:readPosition+inventoryVectorLength]
readPosition += inventoryVectorLength
errorTextLength, errorTextLengthLength = decodeVarint(data[readPosition:readPosition+10])
if errorTextLength > 1000:
return
readPosition += errorTextLengthLength
errorText = data[readPosition:readPosition+errorTextLength]
if fatalStatus == 0:
fatalHumanFriendly = 'Warning'
elif fatalStatus == 1:
fatalHumanFriendly = 'Error'
elif fatalStatus == 2:
fatalHumanFriendly = 'Fatal'
message = '%s message received from %s: %s.' % (fatalHumanFriendly, self.peer, errorText)
if inventoryVector:
message += " This concerns object %s" % hexlify(inventoryVector)
if banTime > 0:
message += " Remote node says that the ban time is %s" % banTime
logger.error(message)
def recobject(self, data):
self.messageProcessingStartTime = time.time()
lengthOfTimeWeShouldUseToProcessThisMessage = shared.checkAndShareObjectWithPeers(data)
"""
Sleeping will help guarantee that we can process messages faster than a
remote node can send them. If we fall behind, the attacker could observe
that we are are slowing down the rate at which we request objects from the
network which would indicate that we own a particular address (whichever
one to which they are sending all of their attack messages). Note
that if an attacker connects to a target with many connections, this
mitigation mechanism might not be sufficient.
"""
sleepTime = lengthOfTimeWeShouldUseToProcessThisMessage - (time.time() - self.messageProcessingStartTime)
self._sleepForTimingAttackMitigation(sleepTime)
# We have received an inv message
def recinv(self, data):
totalNumberOfobjectsThatWeHaveYetToGetFromAllPeers = 0 # this counts duplicates separately because they take up memory
if len(shared.numberOfObjectsThatWeHaveYetToGetPerPeer) > 0:
for key, value in shared.numberOfObjectsThatWeHaveYetToGetPerPeer.items():
totalNumberOfobjectsThatWeHaveYetToGetFromAllPeers += value
logger.debug('number of keys(hosts) in shared.numberOfObjectsThatWeHaveYetToGetPerPeer: ' + str(len(shared.numberOfObjectsThatWeHaveYetToGetPerPeer)) + "\n" + \
'totalNumberOfobjectsThatWeHaveYetToGetFromAllPeers = ' + str(totalNumberOfobjectsThatWeHaveYetToGetFromAllPeers))
numberOfItemsInInv, lengthOfVarint = decodeVarint(data[:10])
if numberOfItemsInInv > 50000:
sys.stderr.write('Too many items in inv message!')
return
if len(data) < lengthOfVarint + (numberOfItemsInInv * 32):
logger.info('inv message doesn\'t contain enough data. Ignoring.')
return
if numberOfItemsInInv == 1: # we'll just request this data from the person who advertised the object.
if totalNumberOfobjectsThatWeHaveYetToGetFromAllPeers > 200000 and len(self.objectsThatWeHaveYetToGetFromThisPeer) > 1000 and shared.trustedPeer == None: # inv flooding attack mitigation
logger.debug('We already have ' + str(totalNumberOfobjectsThatWeHaveYetToGetFromAllPeers) + ' items yet to retrieve from peers and over 1000 from this node in particular. Ignoring this inv message.')
return
self.someObjectsOfWhichThisRemoteNodeIsAlreadyAware[
data[lengthOfVarint:32 + lengthOfVarint]] = 0
shared.numberOfInventoryLookupsPerformed += 1
if data[lengthOfVarint:32 + lengthOfVarint] in shared.inventory:
logger.debug('Inventory has inventory item already.')
else:
self.sendgetdata(data[lengthOfVarint:32 + lengthOfVarint])
else:
# There are many items listed in this inv message. Let us create a
# 'set' of objects we are aware of and a set of objects in this inv
# message so that we can diff one from the other cheaply.
startTime = time.time()
advertisedSet = set()
for i in range(numberOfItemsInInv):
advertisedSet.add(data[lengthOfVarint + (32 * i):32 + lengthOfVarint + (32 * i)])
objectsNewToMe = advertisedSet - shared.inventory.hashes_by_stream(self.streamNumber)
logger.info('inv message lists %s objects. Of those %s are new to me. It took %s seconds to figure that out.', numberOfItemsInInv, len(objectsNewToMe), time.time()-startTime)
for item in objectsNewToMe:
if totalNumberOfobjectsThatWeHaveYetToGetFromAllPeers > 200000 and len(self.objectsThatWeHaveYetToGetFromThisPeer) > 1000 and shared.trustedPeer == None: # inv flooding attack mitigation
logger.debug('We already have ' + str(totalNumberOfobjectsThatWeHaveYetToGetFromAllPeers) + ' items yet to retrieve from peers and over ' + str(len(self.objectsThatWeHaveYetToGetFromThisPeer)), ' from this node in particular. Ignoring the rest of this inv message.')
break
self.someObjectsOfWhichThisRemoteNodeIsAlreadyAware[item] = 0 # helps us keep from sending inv messages to peers that already know about the objects listed therein
self.objectsThatWeHaveYetToGetFromThisPeer[item] = 0 # upon finishing dealing with an incoming message, the receiveDataThread will request a random object of from peer out of this data structure. This way if we get multiple inv messages from multiple peers which list mostly the same objects, we will make getdata requests for different random objects from the various peers.
if len(self.objectsThatWeHaveYetToGetFromThisPeer) > 0:
shared.numberOfObjectsThatWeHaveYetToGetPerPeer[
self.peer] = len(self.objectsThatWeHaveYetToGetFromThisPeer)
# Send a getdata message to our peer to request the object with the given
# hash
def sendgetdata(self, hash):
logger.debug('sending getdata to retrieve object with hash: ' + hexlify(hash))
payload = '\x01' + hash
self.sendDataThreadQueue.put((0, 'sendRawData', shared.CreatePacket('getdata', payload)))
# We have received a getdata request from our peer
def recgetdata(self, data):
numberOfRequestedInventoryItems, lengthOfVarint = decodeVarint(
data[:10])
if len(data) < lengthOfVarint + (32 * numberOfRequestedInventoryItems):
logger.debug('getdata message does not contain enough data. Ignoring.')
return
self.antiIntersectionDelay(True) # only handle getdata requests if we have been connected long enough
for i in xrange(numberOfRequestedInventoryItems):
hash = data[lengthOfVarint + (
i * 32):32 + lengthOfVarint + (i * 32)]
logger.debug('received getdata request for item:' + hexlify(hash))
shared.numberOfInventoryLookupsPerformed += 1
shared.inventoryLock.acquire()
if self.objectHashHolderInstance.hasHash(hash):
shared.inventoryLock.release()
self.antiIntersectionDelay()
else:
shared.inventoryLock.release()
if hash in shared.inventory:
self.sendObject(shared.inventory[hash].payload)
else:
self.antiIntersectionDelay()
logger.warning('%s asked for an object with a getdata which is not in either our memory inventory or our SQL inventory. We probably cleaned it out after advertising it but before they got around to asking for it.' % (self.peer,))
# Our peer has requested (in a getdata message) that we send an object.
def sendObject(self, payload):
logger.debug('sending an object.')
self.sendDataThreadQueue.put((0, 'sendRawData', shared.CreatePacket('object',payload)))
def _checkIPAddress(self, host):
if host[0:12] == '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF':
hostStandardFormat = socket.inet_ntop(socket.AF_INET, host[12:])
return self._checkIPv4Address(host[12:], hostStandardFormat)
elif host[0:6] == '\xfd\x87\xd8\x7e\xeb\x43':
# Onion, based on BMD/bitcoind
hostStandardFormat = base64.b32encode(host[6:]).lower() + ".onion"
return hostStandardFormat
else:
hostStandardFormat = socket.inet_ntop(socket.AF_INET6, host)
if hostStandardFormat == "":
# This can happen on Windows systems which are not 64-bit compatible
# so let us drop the IPv6 address.
return False
return self._checkIPv6Address(host, hostStandardFormat)
def _checkIPv4Address(self, host, hostStandardFormat):
if host[0] == '\x7F': # 127/8
logger.debug('Ignoring IP address in loopback range: ' + hostStandardFormat)
return False
if host[0] == '\x0A': # 10/8
logger.debug('Ignoring IP address in private range: ' + hostStandardFormat)
return False
if host[0:2] == '\xC0\xA8': # 192.168/16
logger.debug('Ignoring IP address in private range: ' + hostStandardFormat)
return False
if host[0:2] >= '\xAC\x10' and host[0:2] < '\xAC\x20': # 172.16/12
logger.debug('Ignoring IP address in private range:' + hostStandardFormat)
return False
return hostStandardFormat
def _checkIPv6Address(self, host, hostStandardFormat):
if host == ('\x00' * 15) + '\x01':
logger.debug('Ignoring loopback address: ' + hostStandardFormat)
return False
if host[0] == '\xFE' and (ord(host[1]) & 0xc0) == 0x80:
logger.debug ('Ignoring local address: ' + hostStandardFormat)
return False
if (ord(host[0]) & 0xfe) == 0xfc:
logger.debug ('Ignoring unique local address: ' + hostStandardFormat)
return False
return hostStandardFormat
# We have received an addr message.
def recaddr(self, data):
numberOfAddressesIncluded, lengthOfNumberOfAddresses = decodeVarint(
data[:10])
if shared.verbose >= 1:
logger.debug('addr message contains ' + str(numberOfAddressesIncluded) + ' IP addresses.')
if numberOfAddressesIncluded > 1000 or numberOfAddressesIncluded == 0:
return
if len(data) != lengthOfNumberOfAddresses + (38 * numberOfAddressesIncluded):
logger.debug('addr message does not contain the correct amount of data. Ignoring.')
return
for i in range(0, numberOfAddressesIncluded):
fullHost = data[20 + lengthOfNumberOfAddresses + (38 * i):36 + lengthOfNumberOfAddresses + (38 * i)]
recaddrStream, = unpack('>I', data[8 + lengthOfNumberOfAddresses + (
38 * i):12 + lengthOfNumberOfAddresses + (38 * i)])
if recaddrStream == 0:
continue
if recaddrStream != self.streamNumber and recaddrStream != (self.streamNumber * 2) and recaddrStream != ((self.streamNumber * 2) + 1): # if the embedded stream number is not in my stream or either of my child streams then ignore it. Someone might be trying funny business.
continue
recaddrServices, = unpack('>Q', data[12 + lengthOfNumberOfAddresses + (
38 * i):20 + lengthOfNumberOfAddresses + (38 * i)])
recaddrPort, = unpack('>H', data[36 + lengthOfNumberOfAddresses + (
38 * i):38 + lengthOfNumberOfAddresses + (38 * i)])
hostStandardFormat = self._checkIPAddress(fullHost)
if hostStandardFormat is False:
continue
if recaddrPort == 0:
continue
timeSomeoneElseReceivedMessageFromThisNode, = unpack('>Q', data[lengthOfNumberOfAddresses + (
38 * i):8 + lengthOfNumberOfAddresses + (38 * i)]) # This is the 'time' value in the received addr message. 64-bit.
if recaddrStream not in shared.knownNodes: # knownNodes is a dictionary of dictionaries with one outer dictionary for each stream. If the outer stream dictionary doesn't exist yet then we must make it.
with shared.knownNodesLock:
shared.knownNodes[recaddrStream] = {}
peerFromAddrMessage = shared.Peer(hostStandardFormat, recaddrPort)
if peerFromAddrMessage not in shared.knownNodes[recaddrStream]:
if len(shared.knownNodes[recaddrStream]) < 20000 and timeSomeoneElseReceivedMessageFromThisNode > (int(time.time()) - 10800) and timeSomeoneElseReceivedMessageFromThisNode < (int(time.time()) + 10800): # If we have more than 20000 nodes in our list already then just forget about adding more. Also, make sure that the time that someone else received a message from this node is within three hours from now.
with shared.knownNodesLock:
shared.knownNodes[recaddrStream][peerFromAddrMessage] = timeSomeoneElseReceivedMessageFromThisNode
logger.debug('added new node ' + str(peerFromAddrMessage) + ' to knownNodes in stream ' + str(recaddrStream))
shared.needToWriteKnownNodesToDisk = True
hostDetails = (
timeSomeoneElseReceivedMessageFromThisNode,
recaddrStream, recaddrServices, hostStandardFormat, recaddrPort)
shared.broadcastToSendDataQueues((
self.streamNumber, 'advertisepeer', hostDetails))
else:
timeLastReceivedMessageFromThisNode = shared.knownNodes[recaddrStream][
peerFromAddrMessage]
if (timeLastReceivedMessageFromThisNode < timeSomeoneElseReceivedMessageFromThisNode) and (timeSomeoneElseReceivedMessageFromThisNode < int(time.time())+900): # 900 seconds for wiggle-room in case other nodes' clocks aren't quite right.
with shared.knownNodesLock:
shared.knownNodes[recaddrStream][peerFromAddrMessage] = timeSomeoneElseReceivedMessageFromThisNode
logger.debug('knownNodes currently has ' + str(len(shared.knownNodes[self.streamNumber])) + ' nodes for this stream.')
# Send a huge addr message to our peer. This is only used
# when we fully establish a connection with a
# peer (with the full exchange of version and verack
# messages).
def sendaddr(self):
addrsInMyStream = {}
addrsInChildStreamLeft = {}
addrsInChildStreamRight = {}
# print 'knownNodes', shared.knownNodes
# We are going to share a maximum number of 1000 addrs with our peer.
# 500 from this stream, 250 from the left child stream, and 250 from
# the right child stream.
with shared.knownNodesLock:
if len(shared.knownNodes[self.streamNumber]) > 0:
ownPosition = random.randint(0, 499)
sentOwn = False
for i in range(500):
# if current connection is over a proxy, sent our own onion address at a random position
if ownPosition == i and ".onion" in shared.config.get("bitmessagesettings", "onionhostname") and \
hasattr(self.sock, "getproxytype") and self.sock.getproxytype() != "none" and not sentOwn:
peer = shared.Peer(shared.config.get("bitmessagesettings", "onionhostname"), shared.config.getint("bitmessagesettings", "onionport"))
else:
# still may contain own onion address, but we don't change it
peer, = random.sample(shared.knownNodes[self.streamNumber], 1)
if isHostInPrivateIPRange(peer.host):
continue
if peer.host == shared.config.get("bitmessagesettings", "onionhostname") and peer.port == shared.config.getint("bitmessagesettings", "onionport") :
sentOwn = True
addrsInMyStream[peer] = shared.knownNodes[
self.streamNumber][peer]
if len(shared.knownNodes[self.streamNumber * 2]) > 0:
for i in range(250):
peer, = random.sample(shared.knownNodes[
self.streamNumber * 2], 1)
if isHostInPrivateIPRange(peer.host):
continue
addrsInChildStreamLeft[peer] = shared.knownNodes[
self.streamNumber * 2][peer]
if len(shared.knownNodes[(self.streamNumber * 2) + 1]) > 0:
for i in range(250):
peer, = random.sample(shared.knownNodes[
(self.streamNumber * 2) + 1], 1)
if isHostInPrivateIPRange(peer.host):
continue
addrsInChildStreamRight[peer] = shared.knownNodes[
(self.streamNumber * 2) + 1][peer]
numberOfAddressesInAddrMessage = 0
payload = ''
# print 'addrsInMyStream.items()', addrsInMyStream.items()
for (HOST, PORT), value in addrsInMyStream.items():
timeLastReceivedMessageFromThisNode = value
if timeLastReceivedMessageFromThisNode > (int(time.time()) - shared.maximumAgeOfNodesThatIAdvertiseToOthers): # If it is younger than 3 hours old..
numberOfAddressesInAddrMessage += 1
payload += pack(
'>Q', timeLastReceivedMessageFromThisNode) # 64-bit time
payload += pack('>I', self.streamNumber)
payload += pack(
'>q', 1) # service bit flags offered by this node
payload += shared.encodeHost(HOST)
payload += pack('>H', PORT) # remote port
for (HOST, PORT), value in addrsInChildStreamLeft.items():
timeLastReceivedMessageFromThisNode = value
if timeLastReceivedMessageFromThisNode > (int(time.time()) - shared.maximumAgeOfNodesThatIAdvertiseToOthers): # If it is younger than 3 hours old..
numberOfAddressesInAddrMessage += 1
payload += pack(
'>Q', timeLastReceivedMessageFromThisNode) # 64-bit time
payload += pack('>I', self.streamNumber * 2)
payload += pack(
'>q', 1) # service bit flags offered by this node
payload += shared.encodeHost(HOST)
payload += pack('>H', PORT) # remote port
for (HOST, PORT), value in addrsInChildStreamRight.items():
timeLastReceivedMessageFromThisNode = value
if timeLastReceivedMessageFromThisNode > (int(time.time()) - shared.maximumAgeOfNodesThatIAdvertiseToOthers): # If it is younger than 3 hours old..
numberOfAddressesInAddrMessage += 1
payload += pack(
'>Q', timeLastReceivedMessageFromThisNode) # 64-bit time
payload += pack('>I', (self.streamNumber * 2) + 1)
payload += pack(
'>q', 1) # service bit flags offered by this node
payload += shared.encodeHost(HOST)
payload += pack('>H', PORT) # remote port
payload = encodeVarint(numberOfAddressesInAddrMessage) + payload
self.sendDataThreadQueue.put((0, 'sendRawData', shared.CreatePacket('addr', payload)))
# We have received a version message
def recversion(self, data):
if len(data) < 83:
# This version message is unreasonably short. Forget it.
return
if self.verackSent:
"""
We must have already processed the remote node's version message.
There might be a time in the future when we Do want to process
a new version message, like if the remote node wants to update
the streams in which they are interested. But for now we'll
ignore this version message
"""
return
self.remoteProtocolVersion, = unpack('>L', data[:4])
self.services, = unpack('>q', data[4:12])
if self.remoteProtocolVersion < 3:
self.sendDataThreadQueue.put((0, 'shutdown','no data'))
logger.debug ('Closing connection to old protocol version ' + str(self.remoteProtocolVersion) + ' node: ' + str(self.peer))
return
timestamp, = unpack('>Q', data[12:20])
timeOffset = timestamp - int(time.time())
if timeOffset > 3600:
self.sendDataThreadQueue.put((0, 'sendRawData', shared.assembleErrorMessage(fatal=2, errorText="Your time is too far in the future compared to mine. Closing connection.")))
logger.info("%s's time is too far in the future (%s seconds). Closing connection to it." % (self.peer, timeOffset))
time.sleep(2)
self.sendDataThreadQueue.put((0, 'shutdown','no data'))
return
if timeOffset < -3600:
self.sendDataThreadQueue.put((0, 'sendRawData', shared.assembleErrorMessage(fatal=2, errorText="Your time is too far in the past compared to mine. Closing connection.")))
logger.info("%s's time is too far in the past (timeOffset %s seconds). Closing connection to it." % (self.peer, timeOffset))
time.sleep(2)
self.sendDataThreadQueue.put((0, 'shutdown','no data'))
return
self.myExternalIP = socket.inet_ntoa(data[40:44])
# print 'myExternalIP', self.myExternalIP
self.remoteNodeIncomingPort, = unpack('>H', data[70:72])
# print 'remoteNodeIncomingPort', self.remoteNodeIncomingPort
useragentLength, lengthOfUseragentVarint = decodeVarint(
data[80:84])
readPosition = 80 + lengthOfUseragentVarint
useragent = data[readPosition:readPosition + useragentLength]
# version check
try:
userAgentName, userAgentVersion = useragent[1:-1].split(":", 2)
except:
userAgentName = useragent
userAgentVersion = "0.0.0"
if userAgentName == "PyBitmessage":
myVersion = [int(n) for n in shared.softwareVersion.split(".")]
try:
remoteVersion = [int(n) for n in userAgentVersion.split(".")]
except:
remoteVersion = 0
# remote is newer, but do not cross between stable and unstable
try:
if cmp(remoteVersion, myVersion) > 0 and \
(myVersion[1] % 2 == remoteVersion[1] % 2):
shared.UISignalQueue.put(('newVersionAvailable', remoteVersion))
except:
pass
readPosition += useragentLength
numberOfStreamsInVersionMessage, lengthOfNumberOfStreamsInVersionMessage = decodeVarint(
data[readPosition:])
readPosition += lengthOfNumberOfStreamsInVersionMessage
self.streamNumber, lengthOfRemoteStreamNumber = decodeVarint(
data[readPosition:])
logger.debug('Remote node useragent: ' + useragent + ' stream number:' + str(self.streamNumber) + ' time offset: ' + str(timeOffset) + ' seconds.')
if self.streamNumber != 1:
self.sendDataThreadQueue.put((0, 'shutdown','no data'))
logger.debug ('Closed connection to ' + str(self.peer) + ' because they are interested in stream ' + str(self.streamNumber) + '.')
return
shared.connectedHostsList[
self.peer.host] = 1 # We use this data structure to not only keep track of what hosts we are connected to so that we don't try to connect to them again, but also to list the connections count on the Network Status tab.
# If this was an incoming connection, then the sendDataThread
# doesn't know the stream. We have to set it.
if not self.initiatedConnection:
self.sendDataThreadQueue.put((0, 'setStreamNumber', self.streamNumber))
if data[72:80] == shared.eightBytesOfRandomDataUsedToDetectConnectionsToSelf:
self.sendDataThreadQueue.put((0, 'shutdown','no data'))
logger.debug('Closing connection to myself: ' + str(self.peer))
return
# The other peer's protocol version is of interest to the sendDataThread but we learn of it
# in this version message. Let us inform the sendDataThread.
self.sendDataThreadQueue.put((0, 'setRemoteProtocolVersion', self.remoteProtocolVersion))
if not isHostInPrivateIPRange(self.peer.host):
with shared.knownNodesLock:
shared.knownNodes[self.streamNumber][shared.Peer(self.peer.host, self.remoteNodeIncomingPort)] = int(time.time())
if not self.initiatedConnection:
shared.knownNodes[self.streamNumber][shared.Peer(self.peer.host, self.remoteNodeIncomingPort)] -= 162000 # penalise inbound, 2 days minus 3 hours
shared.needToWriteKnownNodesToDisk = True
self.sendverack()
if self.initiatedConnection == False:
self.sendversion()
# Sends a version message
def sendversion(self):
logger.debug('Sending version message')
self.sendDataThreadQueue.put((0, 'sendRawData', shared.assembleVersionMessage(
self.peer.host, self.peer.port, self.streamNumber, not self.initiatedConnection)))
# Sends a verack message
def sendverack(self):
logger.debug('Sending verack')
self.sendDataThreadQueue.put((0, 'sendRawData', shared.CreatePacket('verack')))
self.verackSent = True
if self.verackReceived:
self.connectionFullyEstablished()
|
timothyparez/PyBitmessage
|
src/class_receiveDataThread.py
|
Python
|
mit
| 46,068
|
#!/usr/bin/env python
from iris_sdk.models.maps.base_map import BaseMap
class CitiesMap(BaseMap):
result_count = None
cities = None
|
scottbarstow/iris-python
|
iris_sdk/models/maps/cities.py
|
Python
|
mit
| 142
|
#!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test various fingerprinting protections.
If a stale block more than a month old or its header are requested by a peer,
the node should pretend that it does not have it to avoid fingerprinting.
"""
import time
from test_framework.blocktools import (create_block, create_coinbase)
from test_framework.messages import CInv
from test_framework.mininode import (
P2PInterface,
msg_headers,
msg_block,
msg_getdata,
msg_getheaders,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
wait_until,
)
class P2PFingerprintTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
# Build a chain of blocks on top of given one
def build_chain(self, nblocks, prev_hash, prev_height, prev_median_time):
blocks = []
for _ in range(nblocks):
coinbase = create_coinbase(prev_height + 1)
block_time = prev_median_time + 1
block = create_block(int(prev_hash, 16), coinbase, block_time)
block.solve()
blocks.append(block)
prev_hash = block.hash
prev_height += 1
prev_median_time = block_time
return blocks
# Send a getdata request for a given block hash
def send_block_request(self, block_hash, node):
msg = msg_getdata()
msg.inv.append(CInv(2, block_hash)) # 2 == "Block"
node.send_message(msg)
# Send a getheaders request for a given single block hash
def send_header_request(self, block_hash, node):
msg = msg_getheaders()
msg.hashstop = block_hash
node.send_message(msg)
# Check whether last block received from node has a given hash
def last_block_equals(self, expected_hash, node):
block_msg = node.last_message.get("block")
return block_msg and block_msg.block.rehash() == expected_hash
# Check whether last block header received from node has a given hash
def last_header_equals(self, expected_hash, node):
headers_msg = node.last_message.get("headers")
return (headers_msg and
headers_msg.headers and
headers_msg.headers[0].rehash() == expected_hash)
# Checks that stale blocks timestamped more than a month ago are not served
# by the node while recent stale blocks and old active chain blocks are.
# This does not currently test that stale blocks timestamped within the
# last month but that have over a month's worth of work are also withheld.
def run_test(self):
node0 = self.nodes[0].add_p2p_connection(P2PInterface())
# Set node time to 60 days ago
self.nodes[0].setmocktime(int(time.time()) - 60 * 24 * 60 * 60)
# Generating a chain of 10 blocks
block_hashes = self.nodes[0].generate(nblocks=10)
# Create longer chain starting 2 blocks before current tip
height = len(block_hashes) - 2
block_hash = block_hashes[height - 1]
block_time = self.nodes[0].getblockheader(block_hash)["mediantime"] + 1
new_blocks = self.build_chain(5, block_hash, height, block_time)
# Force reorg to a longer chain
node0.send_message(msg_headers(new_blocks))
node0.wait_for_getdata()
for block in new_blocks:
node0.send_and_ping(msg_block(block))
# Check that reorg succeeded
assert_equal(self.nodes[0].getblockcount(), 13)
stale_hash = int(block_hashes[-1], 16)
# Check that getdata request for stale block succeeds
self.send_block_request(stale_hash, node0)
test_function = lambda: self.last_block_equals(stale_hash, node0)
wait_until(test_function, timeout=3)
# Check that getheader request for stale block header succeeds
self.send_header_request(stale_hash, node0)
test_function = lambda: self.last_header_equals(stale_hash, node0)
wait_until(test_function, timeout=3)
# Longest chain is extended so stale is much older than chain tip
self.nodes[0].setmocktime(0)
tip = self.nodes[0].generate(nblocks=1)[0]
assert_equal(self.nodes[0].getblockcount(), 14)
# Send getdata & getheaders to refresh last received getheader message
block_hash = int(tip, 16)
self.send_block_request(block_hash, node0)
self.send_header_request(block_hash, node0)
node0.sync_with_ping()
# Request for very old stale block should now fail
self.send_block_request(stale_hash, node0)
time.sleep(3)
assert not self.last_block_equals(stale_hash, node0)
# Request for very old stale block header should now fail
self.send_header_request(stale_hash, node0)
time.sleep(3)
assert not self.last_header_equals(stale_hash, node0)
# Verify we can fetch very old blocks and headers on the active chain
block_hash = int(block_hashes[2], 16)
self.send_block_request(block_hash, node0)
self.send_header_request(block_hash, node0)
node0.sync_with_ping()
self.send_block_request(block_hash, node0)
test_function = lambda: self.last_block_equals(block_hash, node0)
wait_until(test_function, timeout=3)
self.send_header_request(block_hash, node0)
test_function = lambda: self.last_header_equals(block_hash, node0)
wait_until(test_function, timeout=3)
if __name__ == '__main__':
P2PFingerprintTest().main()
|
Flowdalic/bitcoin
|
test/functional/p2p_fingerprint.py
|
Python
|
mit
| 5,771
|
# -*- coding: utf-8 -*-
import unittest
from wechatpy.replies import TextReply, create_reply
class CreateReplyTestCase(unittest.TestCase):
def test_create_reply_with_text_not_render(self):
text = "test"
reply = create_reply(text, render=False)
self.assertEqual("text", reply.type)
self.assertEqual(text, reply.content)
reply.render()
def test_create_reply_with_text_render(self):
text = "test"
reply = create_reply(text, render=True)
self.assertTrue(isinstance(reply, str))
def test_create_reply_with_message(self):
from wechatpy.messages import TextMessage
msg = TextMessage(
{
"FromUserName": "user1",
"ToUserName": "user2",
}
)
reply = create_reply("test", msg, render=False)
self.assertEqual("user1", reply.target)
self.assertEqual("user2", reply.source)
reply.render()
def test_create_reply_with_reply(self):
_reply = TextReply(content="test")
reply = create_reply(_reply, render=False)
self.assertEqual(_reply, reply)
reply.render()
def test_create_reply_with_articles(self):
articles = [
{
"title": "test 1",
"description": "test 1",
"image": "http://www.qq.com/1.png",
"url": "http://www.qq.com/1",
},
{
"title": "test 2",
"description": "test 2",
"image": "http://www.qq.com/2.png",
"url": "http://www.qq.com/2",
},
{
"title": "test 3",
"description": "test 3",
"image": "http://www.qq.com/3.png",
"url": "http://www.qq.com/3",
},
]
reply = create_reply(articles, render=False)
self.assertEqual("news", reply.type)
reply.render()
def test_create_reply_with_more_than_ten_articles(self):
articles = [
{
"title": "test 1",
"description": "test 1",
"image": "http://www.qq.com/1.png",
"url": "http://www.qq.com/1",
},
{
"title": "test 2",
"description": "test 2",
"image": "http://www.qq.com/2.png",
"url": "http://www.qq.com/2",
},
{
"title": "test 3",
"description": "test 3",
"image": "http://www.qq.com/3.png",
"url": "http://www.qq.com/3",
},
{
"title": "test 4",
"description": "test 4",
"image": "http://www.qq.com/4.png",
"url": "http://www.qq.com/4",
},
{
"title": "test 5",
"description": "test 5",
"image": "http://www.qq.com/5.png",
"url": "http://www.qq.com/5",
},
{
"title": "test 6",
"description": "test 6",
"image": "http://www.qq.com/6.png",
"url": "http://www.qq.com/6",
},
{
"title": "test 7",
"description": "test 7",
"image": "http://www.qq.com/7.png",
"url": "http://www.qq.com/7",
},
{
"title": "test 8",
"description": "test 8",
"image": "http://www.qq.com/8.png",
"url": "http://www.qq.com/8",
},
{
"title": "test 9",
"description": "test 9",
"image": "http://www.qq.com/9.png",
"url": "http://www.qq.com/9",
},
{
"title": "test 10",
"description": "test 10",
"image": "http://www.qq.com/10.png",
"url": "http://www.qq.com/10",
},
{
"title": "test 11",
"description": "test 11",
"image": "http://www.qq.com/11.png",
"url": "http://www.qq.com/11",
},
]
self.assertRaises(AttributeError, create_reply, articles)
def test_create_empty_reply(self):
from wechatpy.replies import EmptyReply
reply = create_reply("")
self.assertTrue(isinstance(reply, EmptyReply))
reply = create_reply(None)
self.assertTrue(isinstance(reply, EmptyReply))
reply = create_reply(False)
self.assertTrue(isinstance(reply, EmptyReply))
|
jxtech/wechatpy
|
tests/test_create_reply.py
|
Python
|
mit
| 4,741
|
"""
tests specific to "pip install --user"
"""
import os
import textwrap
from os.path import curdir, isdir, isfile
import pytest
from pip._internal.compat import cache_from_source, uses_pycache
from tests.lib import pyversion
from tests.lib.local_repos import local_checkout
def _patch_dist_in_site_packages(script):
sitecustomize_path = script.lib_path.join("sitecustomize.py")
sitecustomize_path.write(textwrap.dedent("""
def dist_in_site_packages(dist):
return False
from pip._internal.req import req_install
req_install.dist_in_site_packages = dist_in_site_packages
"""))
# Caught py32 with an outdated __pycache__ file after a sitecustomize
# update (after python should have updated it) so will delete the cache
# file to be sure
# See: https://github.com/pypa/pip/pull/893#issuecomment-16426701
if uses_pycache:
cache_path = cache_from_source(sitecustomize_path)
if os.path.isfile(cache_path):
os.remove(cache_path)
class Tests_UserSite:
@pytest.mark.network
def test_reset_env_system_site_packages_usersite(self, script, virtualenv):
"""
reset_env(system_site_packages=True) produces env where a --user
install can be found using pkg_resources
"""
virtualenv.system_site_packages = True
script.pip('install', '--user', 'INITools==0.2')
result = script.run(
'python', '-c',
"import pkg_resources; print(pkg_resources.get_distribution"
"('initools').project_name)",
)
project_name = result.stdout.strip()
assert 'INITools' == project_name, project_name
@pytest.mark.network
def test_install_subversion_usersite_editable_with_distribute(
self, script, virtualenv, tmpdir):
"""
Test installing current directory ('.') into usersite after installing
distribute
"""
virtualenv.system_site_packages = True
result = script.pip(
'install', '--user', '-e',
'%s#egg=initools' %
local_checkout(
'svn+http://svn.colorstudy.com/INITools/trunk',
tmpdir.join("cache"),
)
)
result.assert_installed('INITools', use_user_site=True)
@pytest.mark.network
def test_install_from_current_directory_into_usersite(
self, script, virtualenv, data, common_wheels):
"""
Test installing current directory ('.') into usersite
"""
virtualenv.system_site_packages = True
script.pip("install", "wheel", '--no-index', '-f', common_wheels)
run_from = data.packages.join("FSPkg")
result = script.pip(
'install', '-vvv', '--user', curdir,
cwd=run_from,
expect_error=False,
)
fspkg_folder = script.user_site / 'fspkg'
assert fspkg_folder in result.files_created, result.stdout
dist_info_folder = (
script.user_site / 'FSPkg-0.1.dev0.dist-info'
)
assert dist_info_folder in result.files_created
def test_install_user_venv_nositepkgs_fails(self, script, data):
"""
user install in virtualenv (with no system packages) fails with message
"""
run_from = data.packages.join("FSPkg")
result = script.pip(
'install', '--user', curdir,
cwd=run_from,
expect_error=True,
)
assert (
"Can not perform a '--user' install. User site-packages are not "
"visible in this virtualenv." in result.stderr
)
@pytest.mark.network
def test_install_user_conflict_in_usersite(self, script, virtualenv):
"""
Test user install with conflict in usersite updates usersite.
"""
virtualenv.system_site_packages = True
script.pip('install', '--user', 'INITools==0.3', '--no-binary=:all:')
result2 = script.pip(
'install', '--user', 'INITools==0.1', '--no-binary=:all:')
# usersite has 0.1
egg_info_folder = (
script.user_site / 'INITools-0.1-py%s.egg-info' % pyversion
)
initools_v3_file = (
# file only in 0.3
script.base_path / script.user_site / 'initools' /
'configparser.py'
)
assert egg_info_folder in result2.files_created, str(result2)
assert not isfile(initools_v3_file), initools_v3_file
@pytest.mark.network
def test_install_user_conflict_in_globalsite(self, script, virtualenv):
"""
Test user install with conflict in global site ignores site and
installs to usersite
"""
# the test framework only supports testing using virtualenvs
# the sys.path ordering for virtualenvs with --system-site-packages is
# this: virtualenv-site, user-site, global-site
# this test will use 2 modifications to simulate the
# user-site/global-site relationship
# 1) a monkey patch which will make it appear INITools==0.2 is not in
# the virtualenv site if we don't patch this, pip will return an
# installation error: "Will not install to the usersite because it
# will lack sys.path precedence..."
# 2) adding usersite to PYTHONPATH, so usersite as sys.path precedence
# over the virtualenv site
virtualenv.system_site_packages = True
script.environ["PYTHONPATH"] = script.base_path / script.user_site
_patch_dist_in_site_packages(script)
script.pip('install', 'INITools==0.2', '--no-binary=:all:')
result2 = script.pip(
'install', '--user', 'INITools==0.1', '--no-binary=:all:')
# usersite has 0.1
egg_info_folder = (
script.user_site / 'INITools-0.1-py%s.egg-info' % pyversion
)
initools_folder = script.user_site / 'initools'
assert egg_info_folder in result2.files_created, str(result2)
assert initools_folder in result2.files_created, str(result2)
# site still has 0.2 (can't look in result1; have to check)
egg_info_folder = (
script.base_path / script.site_packages /
'INITools-0.2-py%s.egg-info' % pyversion
)
initools_folder = script.base_path / script.site_packages / 'initools'
assert isdir(egg_info_folder)
assert isdir(initools_folder)
@pytest.mark.network
def test_upgrade_user_conflict_in_globalsite(self, script, virtualenv):
"""
Test user install/upgrade with conflict in global site ignores site and
installs to usersite
"""
# the test framework only supports testing using virtualenvs
# the sys.path ordering for virtualenvs with --system-site-packages is
# this: virtualenv-site, user-site, global-site
# this test will use 2 modifications to simulate the
# user-site/global-site relationship
# 1) a monkey patch which will make it appear INITools==0.2 is not in
# the virtualenv site if we don't patch this, pip will return an
# installation error: "Will not install to the usersite because it
# will lack sys.path precedence..."
# 2) adding usersite to PYTHONPATH, so usersite as sys.path precedence
# over the virtualenv site
virtualenv.system_site_packages = True
script.environ["PYTHONPATH"] = script.base_path / script.user_site
_patch_dist_in_site_packages(script)
script.pip('install', 'INITools==0.2', '--no-binary=:all:')
result2 = script.pip(
'install', '--user', '--upgrade', 'INITools', '--no-binary=:all:')
# usersite has 0.3.1
egg_info_folder = (
script.user_site / 'INITools-0.3.1-py%s.egg-info' % pyversion
)
initools_folder = script.user_site / 'initools'
assert egg_info_folder in result2.files_created, str(result2)
assert initools_folder in result2.files_created, str(result2)
# site still has 0.2 (can't look in result1; have to check)
egg_info_folder = (
script.base_path / script.site_packages /
'INITools-0.2-py%s.egg-info' % pyversion
)
initools_folder = script.base_path / script.site_packages / 'initools'
assert isdir(egg_info_folder), result2.stdout
assert isdir(initools_folder)
@pytest.mark.network
def test_install_user_conflict_in_globalsite_and_usersite(
self, script, virtualenv):
"""
Test user install with conflict in globalsite and usersite ignores
global site and updates usersite.
"""
# the test framework only supports testing using virtualenvs.
# the sys.path ordering for virtualenvs with --system-site-packages is
# this: virtualenv-site, user-site, global-site.
# this test will use 2 modifications to simulate the
# user-site/global-site relationship
# 1) a monkey patch which will make it appear INITools==0.2 is not in
# the virtualenv site if we don't patch this, pip will return an
# installation error: "Will not install to the usersite because it
# will lack sys.path precedence..."
# 2) adding usersite to PYTHONPATH, so usersite as sys.path precedence
# over the virtualenv site
virtualenv.system_site_packages = True
script.environ["PYTHONPATH"] = script.base_path / script.user_site
_patch_dist_in_site_packages(script)
script.pip('install', 'INITools==0.2', '--no-binary=:all:')
script.pip('install', '--user', 'INITools==0.3', '--no-binary=:all:')
result3 = script.pip(
'install', '--user', 'INITools==0.1', '--no-binary=:all:')
# usersite has 0.1
egg_info_folder = (
script.user_site / 'INITools-0.1-py%s.egg-info' % pyversion
)
initools_v3_file = (
# file only in 0.3
script.base_path / script.user_site / 'initools' /
'configparser.py'
)
assert egg_info_folder in result3.files_created, str(result3)
assert not isfile(initools_v3_file), initools_v3_file
# site still has 0.2 (can't just look in result1; have to check)
egg_info_folder = (
script.base_path / script.site_packages /
'INITools-0.2-py%s.egg-info' % pyversion
)
initools_folder = script.base_path / script.site_packages / 'initools'
assert isdir(egg_info_folder)
assert isdir(initools_folder)
@pytest.mark.network
def test_install_user_in_global_virtualenv_with_conflict_fails(
self, script, virtualenv):
"""
Test user install in --system-site-packages virtualenv with conflict in
site fails.
"""
virtualenv.system_site_packages = True
script.pip('install', 'INITools==0.2')
result2 = script.pip(
'install', '--user', 'INITools==0.1',
expect_error=True,
)
resultp = script.run(
'python', '-c',
"import pkg_resources; print(pkg_resources.get_distribution"
"('initools').location)",
)
dist_location = resultp.stdout.strip()
assert (
"Will not install to the user site because it will lack sys.path "
"precedence to %s in %s" %
('INITools', dist_location) in result2.stderr
)
|
zvezdan/pip
|
tests/functional/test_install_user.py
|
Python
|
mit
| 11,608
|
from flask_restplus import Namespace, Resource
from flask import current_app
from cea.glossary import read_glossary_df
api = Namespace('Glossary', description='Glossary for variables used in CEA')
@api.route('/')
class Glossary(Resource):
def get(self):
glossary = read_glossary_df(plugins=current_app.cea_config.plugins)
groups = glossary.groupby('SCRIPT')
data = []
for group in groups.groups:
df = groups.get_group(group)
result = df[~df.index.duplicated(keep='first')].fillna('-')
data.append({'script': group if group != '-' else 'inputs', 'variables': result.to_dict(orient='records')})
return data
|
architecture-building-systems/CEAforArcGIS
|
cea/interfaces/dashboard/api/glossary.py
|
Python
|
mit
| 692
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Landing pages administration module
===============================================
.. module:: landingpages.admin
:platform: Django
:synopsis: Landing pages administration module
.. moduleauthor:: (C) 2015 Oliver Gutiérrez
"""
# Django imports
from django.contrib import admin
# Site tools imports
from sitetools.admin import BaseModelAdmin
# Application imports
from landingpages.models import LandingPage
class LandingPageAdmin(BaseModelAdmin):
"""
Landing page administration class
"""
list_display = ('name','url','language','template',)
list_filter = ('language','template',)
search_fields = ('name','title','keywords',)
# Register models
admin.site.register(LandingPage,LandingPageAdmin)
|
R3v1L/django-landingpages
|
landingpages/admin.py
|
Python
|
mit
| 813
|
#!/usr/bin/env python3
## Copyright (c) 2011 Steven D'Aprano.
## See the file __init__.py for the licence terms for this software.
"""
General utilities used by the stats package.
"""
__all__ = ['add_partial', 'coroutine','minmax']
import collections
import functools
import itertools
import math
# === Exceptions ===
class StatsError(ValueError):
pass
# === Helper functions ===
def sorted_data(func):
"""Decorator to sort data passed to stats functions."""
@functools.wraps(func)
def inner(data, *args, **kwargs):
data = sorted(data)
return func(data, *args, **kwargs)
return inner
def as_sequence(iterable):
"""Helper function to convert iterable arguments into sequences."""
if isinstance(iterable, (list, tuple)): return iterable
else: return list(iterable)
def _generalised_sum(data, func):
"""_generalised_sum(data, func) -> len(data), sum(func(items of data))
Return a two-tuple of the length of data and the sum of func() of the
items of data. If func is None, use just the sum of items of data.
"""
# Try fast path.
try:
count = len(data)
except TypeError:
# Slow path for iterables without len.
# We want to support BIG data streams, so avoid converting to a
# list. Since we need both a count and a sum, we iterate over the
# items and emulate math.fsum ourselves.
ap = add_partial
partials = []
count = 0
if func is None:
# Note: we could check for func is None inside the loop. That
# is much slower. We could also say func = lambda x: x, which
# isn't as bad but still somewhat expensive.
for count, x in enumerate(data, 1):
ap(x, partials)
else:
for count, x in enumerate(data, 1):
ap(func(x), partials)
total = math.fsum(partials)
else: # Fast path continues.
if func is None:
# See comment above.
total = math.fsum(data)
else:
total = math.fsum(func(x) for x in data)
return count, total
# FIXME this may not be accurate enough for 2nd moments (x-m)**2
# A more accurate algorithm may be the compensated version:
# sum2 = sum(x-m)**2) as above
# sumc = sum(x-m) # Should be zero, but may not be.
# total = sum2 - sumc**2/n
def _sum_sq_deviations(data, m):
"""Returns the sum of square deviations (SS).
Helper function for calculating variance.
"""
if m is None:
# Two pass algorithm.
data = as_sequence(data)
n, total = _generalised_sum(data, None)
if n == 0:
return (0, total)
m = total/n
return _generalised_sum(data, lambda x: (x-m)**2)
def _sum_prod_deviations(xydata, mx, my):
"""Returns the sum of the product of deviations (SP).
Helper function for calculating covariance.
"""
if mx is None:
# Two pass algorithm.
xydata = as_sequence(xydata)
nx, sumx = _generalised_sum((t[0] for t in xydata), None)
if nx == 0:
raise StatsError('no data items')
mx = sumx/nx
if my is None:
# Two pass algorithm.
xydata = as_sequence(xydata)
ny, sumy = _generalised_sum((t[1] for t in xydata), None)
if ny == 0:
raise StatsError('no data items')
my = sumy/ny
return _generalised_sum(xydata, lambda t: (t[0]-mx)*(t[1]-my))
def _validate_int(n):
# This will raise TypeError, OverflowError (for infinities) or
# ValueError (for NANs or non-integer numbers).
if n != int(n):
raise ValueError('requires integer value')
# === Generic utilities ===
from stats import minmax, add_partial
|
tnotstar/pycalcstats
|
src/stats/utils.py
|
Python
|
mit
| 3,781
|
import _plotly_utils.basevalidators
class TickformatstopsValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(
self, plotly_name="tickformatstops", parent_name="contour.colorbar", **kwargs
):
super(TickformatstopsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Tickformatstop"),
data_docs=kwargs.pop(
"data_docs",
"""
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
""",
),
**kwargs
)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/contour/colorbar/_tickformatstops.py
|
Python
|
mit
| 2,290
|
from requests import Request
from oauthlib.common import unquote
from requests_oauthlib import OAuth1
from requests_oauthlib.oauth1_auth import SIGNATURE_TYPE_BODY
from tool_base import ToolBase
from launch_params import LAUNCH_PARAMS_REQUIRED
from utils import parse_qs, InvalidLTIConfigError, generate_identifier
class ToolConsumer(ToolBase):
def __init__(self, consumer_key, consumer_secret,
params=None, launch_url=None):
'''
Create new ToolConsumer.
'''
# allow launch_url to be specified in launch_params for
# backwards compatibility
if launch_url is None:
if 'launch_url' not in params:
raise InvalidLTIConfigError('missing \'launch_url\' arg!')
else:
launch_url = params['launch_url']
del params['launch_url']
self.launch_url = launch_url
super(ToolConsumer, self).__init__(consumer_key, consumer_secret,
params=params)
def has_required_params(self):
return all([
self.launch_params.get(x) for x in LAUNCH_PARAMS_REQUIRED
])
def generate_launch_request(self, **kwargs):
"""
returns a Oauth v1 "signed" requests.PreparedRequest instance
"""
if not self.has_required_params():
raise InvalidLTIConfigError(
'Consumer\'s launch params missing one of ' \
+ str(LAUNCH_PARAMS_REQUIRED)
)
# if 'oauth_consumer_key' not in self.launch_params:
# self.launch_params['oauth_consumer_key'] = self.consumer_key
params = self.to_params()
r = Request('POST', self.launch_url, data=params).prepare()
sign = OAuth1(self.consumer_key, self.consumer_secret,
signature_type=SIGNATURE_TYPE_BODY, **kwargs)
return sign(r)
def generate_launch_data(self, **kwargs):
"""
Provided for backwards compatibility
"""
r = self.generate_launch_request(**kwargs)
return parse_qs(unquote(r.body))
def set_config(self, config):
'''
Set launch data from a ToolConfig.
'''
if self.launch_url == None:
self.launch_url = config.launch_url
self.launch_params.update(config.custom_params)
|
brainheart/dce_lti_py
|
dce_lti_py/tool_consumer.py
|
Python
|
mit
| 2,381
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import re
import json
import tempfile
import toml
import os
class RequirementsTXTUpdater(object):
SUB_REGEX = r"^{}(?=\s*\r?\n?$)"
@classmethod
def update(cls, content, dependency, version, spec="==", hashes=()):
"""
Updates the requirement to the latest version for the given content and adds hashes
if neccessary.
:param content: str, content
:return: str, updated content
"""
new_line = "{name}{spec}{version}".format(name=dependency.full_name, spec=spec, version=version)
appendix = ''
# leave environment markers intact
if ";" in dependency.line:
# condense multiline, split out the env marker, strip comments and --hashes
new_line += ";" + dependency.line.splitlines()[0].split(";", 1)[1] \
.split("#")[0].split("--hash")[0].rstrip()
# add the comment
if "#" in dependency.line:
# split the line into parts: requirement and comment
parts = dependency.line.split("#")
requirement, comment = parts[0], "#".join(parts[1:])
# find all whitespaces between the requirement and the comment
whitespaces = (hex(ord('\t')), hex(ord(' ')))
trailing_whitespace = ''
for c in requirement[::-1]:
if hex(ord(c)) in whitespaces:
trailing_whitespace += c
else:
break
appendix += trailing_whitespace + "#" + comment
# if this is a hashed requirement, add a multiline break before the comment
if dependency.hashes and not new_line.endswith("\\"):
new_line += " \\"
# if this is a hashed requirement, add the hashes
if hashes:
for n, new_hash in enumerate(hashes):
new_line += "\n --hash={method}:{hash}".format(
method=new_hash['method'],
hash=new_hash['hash']
)
# append a new multiline break if this is not the last line
if len(hashes) > n + 1:
new_line += " \\"
new_line += appendix
regex = cls.SUB_REGEX.format(re.escape(dependency.line))
return re.sub(regex, new_line, content, flags=re.MULTILINE)
class CondaYMLUpdater(RequirementsTXTUpdater):
SUB_REGEX = r"{}(?=\s*\r?\n?$)"
class ToxINIUpdater(CondaYMLUpdater):
pass
class SetupCFGUpdater(CondaYMLUpdater):
pass
class PipfileUpdater(object):
@classmethod
def update(cls, content, dependency, version, spec="==", hashes=()):
data = toml.loads(content)
if data:
for package_type in ['packages', 'dev-packages']:
if package_type in data:
if dependency.full_name in data[package_type]:
data[package_type][dependency.full_name] = "{spec}{version}".format(
spec=spec, version=version
)
try:
from pipenv.project import Project
except ImportError:
raise ImportError("Updating a Pipfile requires the pipenv extra to be installed. Install it with "
"pip install dparse[pipenv]")
pipfile = tempfile.NamedTemporaryFile(delete=False)
p = Project(chdir=False)
p.write_toml(data=data, path=pipfile.name)
data = open(pipfile.name).read()
os.remove(pipfile.name)
return data
class PipfileLockUpdater(object):
@classmethod
def update(cls, content, dependency, version, spec="==", hashes=()):
data = json.loads(content)
if data:
for package_type in ['default', 'develop']:
if package_type in data:
if dependency.full_name in data[package_type]:
data[package_type][dependency.full_name] = {
'hashes': [
"{method}:{hash}".format(
hash=h['hash'],
method=h['method']
) for h in hashes
],
'version': "{spec}{version}".format(
spec=spec, version=version
)
}
return json.dumps(data, indent=4, separators=(',', ': ')) + "\n"
|
kennethreitz/pipenv
|
pipenv/vendor/dparse/updater.py
|
Python
|
mit
| 4,566
|
"""Ttk wrapper.
This module provides classes to allow using Tk themed widget set.
Ttk is based on a revised and enhanced version of
TIP #48 (http://tip.tcl.tk/48) specified style engine.
Its basic idea is to separate, to the extent possible, the code
implementing a widget's behavior from the code implementing its
appearance. Widget class bindings are primarily responsible for
maintaining the widget state and invoking callbacks, all aspects
of the widgets appearance lies at Themes.
"""
__version__ = "0.3.1"
__author__ = "Guilherme Polo <ggpolo@gmail.com>"
__all__ = ["Button", "Checkbutton", "Combobox", "Entry", "Frame", "Label",
"Labelframe", "LabelFrame", "Menubutton", "Notebook", "Panedwindow",
"PanedWindow", "Progressbar", "Radiobutton", "Scale", "Scrollbar",
"Separator", "Sizegrip", "Style", "Treeview",
# Extensions
"LabeledScale", "OptionMenu",
# functions
"tclobjs_to_py", "setup_master"]
import tkinter
_flatten = tkinter._flatten
# Verify if Tk is new enough to not need the Tile package
_REQUIRE_TILE = True if tkinter.TkVersion < 8.5 else False
def _load_tile(master):
if _REQUIRE_TILE:
import os
tilelib = os.environ.get('TILE_LIBRARY')
if tilelib:
# append custom tile path to the list of directories that
# Tcl uses when attempting to resolve packages with the package
# command
master.tk.eval(
'global auto_path; '
'lappend auto_path {%s}' % tilelib)
master.tk.eval('package require tile') # TclError may be raised here
master._tile_loaded = True
def _format_optdict(optdict, script=False, ignore=None):
"""Formats optdict to a tuple to pass it to tk.call.
E.g. (script=False):
{'foreground': 'blue', 'padding': [1, 2, 3, 4]} returns:
('-foreground', 'blue', '-padding', '1 2 3 4')"""
format = "%s" if not script else "{%s}"
opts = []
for opt, value in optdict.items():
if ignore and opt in ignore:
continue
if isinstance(value, (list, tuple)):
v = []
for val in value:
if isinstance(val, str):
v.append(str(val) if val else '{}')
else:
v.append(str(val))
# format v according to the script option, but also check for
# space in any value in v in order to group them correctly
value = format % ' '.join(
('{%s}' if ' ' in val else '%s') % val for val in v)
if script and value == '':
value = '{}' # empty string in Python is equivalent to {} in Tcl
opts.append(("-%s" % opt, value))
# Remember: _flatten skips over None
return _flatten(opts)
def _format_mapdict(mapdict, script=False):
"""Formats mapdict to pass it to tk.call.
E.g. (script=False):
{'expand': [('active', 'selected', 'grey'), ('focus', [1, 2, 3, 4])]}
returns:
('-expand', '{active selected} grey focus {1, 2, 3, 4}')"""
# if caller passes a Tcl script to tk.call, all the values need to
# be grouped into words (arguments to a command in Tcl dialect)
format = "%s" if not script else "{%s}"
opts = []
for opt, value in mapdict.items():
opt_val = []
# each value in mapdict is expected to be a sequence, where each item
# is another sequence containing a state (or several) and a value
for statespec in value:
state, val = statespec[:-1], statespec[-1]
if len(state) > 1: # group multiple states
state = "{%s}" % ' '.join(state)
else: # single state
# if it is empty (something that evaluates to False), then
# format it to Tcl code to denote the "normal" state
state = state[0] or '{}'
if isinstance(val, (list, tuple)): # val needs to be grouped
val = "{%s}" % ' '.join(map(str, val))
opt_val.append("%s %s" % (state, val))
opts.append(("-%s" % opt, format % ' '.join(opt_val)))
return _flatten(opts)
def _format_elemcreate(etype, script=False, *args, **kw):
"""Formats args and kw according to the given element factory etype."""
spec = None
opts = ()
if etype in ("image", "vsapi"):
if etype == "image": # define an element based on an image
# first arg should be the default image name
iname = args[0]
# next args, if any, are statespec/value pairs which is almost
# a mapdict, but we just need the value
imagespec = _format_mapdict({None: args[1:]})[1]
spec = "%s %s" % (iname, imagespec)
else:
# define an element whose visual appearance is drawn using the
# Microsoft Visual Styles API which is responsible for the
# themed styles on Windows XP and Vista.
# Availability: Tk 8.6, Windows XP and Vista.
class_name, part_id = args[:2]
statemap = _format_mapdict({None: args[2:]})[1]
spec = "%s %s %s" % (class_name, part_id, statemap)
opts = _format_optdict(kw, script)
elif etype == "from": # clone an element
# it expects a themename and optionally an element to clone from,
# otherwise it will clone {} (empty element)
spec = args[0] # theme name
if len(args) > 1: # elementfrom specified
opts = (args[1], )
if script:
spec = '{%s}' % spec
opts = ' '.join(map(str, opts))
return spec, opts
def _format_layoutlist(layout, indent=0, indent_size=2):
"""Formats a layout list so we can pass the result to ttk::style
layout and ttk::style settings. Note that the layout doesn't has to
be a list necessarily.
E.g.:
[("Menubutton.background", None),
("Menubutton.button", {"children":
[("Menubutton.focus", {"children":
[("Menubutton.padding", {"children":
[("Menubutton.label", {"side": "left", "expand": 1})]
})]
})]
}),
("Menubutton.indicator", {"side": "right"})
]
returns:
Menubutton.background
Menubutton.button -children {
Menubutton.focus -children {
Menubutton.padding -children {
Menubutton.label -side left -expand 1
}
}
}
Menubutton.indicator -side right"""
script = []
for layout_elem in layout:
elem, opts = layout_elem
opts = opts or {}
fopts = ' '.join(map(str, _format_optdict(opts, True, "children")))
head = "%s%s%s" % (' ' * indent, elem, (" %s" % fopts) if fopts else '')
if "children" in opts:
script.append(head + " -children {")
indent += indent_size
newscript, indent = _format_layoutlist(opts['children'], indent,
indent_size)
script.append(newscript)
indent -= indent_size
script.append('%s}' % (' ' * indent))
else:
script.append(head)
return '\n'.join(script), indent
def _script_from_settings(settings):
"""Returns an appropriate script, based on settings, according to
theme_settings definition to be used by theme_settings and
theme_create."""
script = []
# a script will be generated according to settings passed, which
# will then be evaluated by Tcl
for name, opts in settings.items():
# will format specific keys according to Tcl code
if opts.get('configure'): # format 'configure'
s = ' '.join(map(str, _format_optdict(opts['configure'], True)))
script.append("ttk::style configure %s %s;" % (name, s))
if opts.get('map'): # format 'map'
s = ' '.join(map(str, _format_mapdict(opts['map'], True)))
script.append("ttk::style map %s %s;" % (name, s))
if 'layout' in opts: # format 'layout' which may be empty
if not opts['layout']:
s = 'null' # could be any other word, but this one makes sense
else:
s, _ = _format_layoutlist(opts['layout'])
script.append("ttk::style layout %s {\n%s\n}" % (name, s))
if opts.get('element create'): # format 'element create'
eopts = opts['element create']
etype = eopts[0]
# find where args end, and where kwargs start
argc = 1 # etype was the first one
while argc < len(eopts) and not hasattr(eopts[argc], 'items'):
argc += 1
elemargs = eopts[1:argc]
elemkw = eopts[argc] if argc < len(eopts) and eopts[argc] else {}
spec, opts = _format_elemcreate(etype, True, *elemargs, **elemkw)
script.append("ttk::style element create %s %s %s %s" % (
name, etype, spec, opts))
return '\n'.join(script)
def _dict_from_tcltuple(ttuple, cut_minus=True):
"""Break tuple in pairs, format it properly, then build the return
dict. If cut_minus is True, the supposed '-' prefixing options will
be removed.
ttuple is expected to contain an even number of elements."""
opt_start = 1 if cut_minus else 0
retdict = {}
it = iter(ttuple)
for opt, val in zip(it, it):
retdict[str(opt)[opt_start:]] = val
return tclobjs_to_py(retdict)
def _list_from_statespec(stuple):
"""Construct a list from the given statespec tuple according to the
accepted statespec accepted by _format_mapdict."""
nval = []
for val in stuple:
typename = getattr(val, 'typename', None)
if typename is None:
nval.append(val)
else: # this is a Tcl object
val = str(val)
if typename == 'StateSpec':
val = val.split()
nval.append(val)
it = iter(nval)
return [_flatten(spec) for spec in zip(it, it)]
def _list_from_layouttuple(ltuple):
"""Construct a list from the tuple returned by ttk::layout, this is
somewhat the reverse of _format_layoutlist."""
res = []
indx = 0
while indx < len(ltuple):
name = ltuple[indx]
opts = {}
res.append((name, opts))
indx += 1
while indx < len(ltuple): # grab name's options
opt, val = ltuple[indx:indx + 2]
if not opt.startswith('-'): # found next name
break
opt = opt[1:] # remove the '-' from the option
indx += 2
if opt == 'children':
val = _list_from_layouttuple(val)
opts[opt] = val
return res
def _val_or_dict(options, func, *args):
"""Format options then call func with args and options and return
the appropriate result.
If no option is specified, a dict is returned. If a option is
specified with the None value, the value for that option is returned.
Otherwise, the function just sets the passed options and the caller
shouldn't be expecting a return value anyway."""
options = _format_optdict(options)
res = func(*(args + options))
if len(options) % 2: # option specified without a value, return its value
return res
return _dict_from_tcltuple(res)
def _convert_stringval(value):
"""Converts a value to, hopefully, a more appropriate Python object."""
value = str(value)
try:
value = int(value)
except (ValueError, TypeError):
pass
return value
def tclobjs_to_py(adict):
"""Returns adict with its values converted from Tcl objects to Python
objects."""
for opt, val in adict.items():
if val and hasattr(val, '__len__') and not isinstance(val, str):
if getattr(val[0], 'typename', None) == 'StateSpec':
val = _list_from_statespec(val)
else:
val = list(map(_convert_stringval, val))
elif hasattr(val, 'typename'): # some other (single) Tcl object
val = _convert_stringval(val)
adict[opt] = val
return adict
def setup_master(master=None):
"""If master is not None, itself is returned. If master is None,
the default master is returned if there is one, otherwise a new
master is created and returned.
If it is not allowed to use the default root and master is None,
RuntimeError is raised."""
if master is None:
if tkinter._support_default_root:
master = tkinter._default_root or tkinter.Tk()
else:
raise RuntimeError(
"No master specified and tkinter is "
"configured to not support default root")
return master
class Style(object):
"""Manipulate style database."""
_name = "ttk::style"
def __init__(self, master=None):
master = setup_master(master)
if not getattr(master, '_tile_loaded', False):
# Load tile now, if needed
_load_tile(master)
self.master = master
self.tk = self.master.tk
def configure(self, style, query_opt=None, **kw):
"""Query or sets the default value of the specified option(s) in
style.
Each key in kw is an option and each value is either a string or
a sequence identifying the value for that option."""
if query_opt is not None:
kw[query_opt] = None
return _val_or_dict(kw, self.tk.call, self._name, "configure", style)
def map(self, style, query_opt=None, **kw):
"""Query or sets dynamic values of the specified option(s) in
style.
Each key in kw is an option and each value should be a list or a
tuple (usually) containing statespecs grouped in tuples, or list,
or something else of your preference. A statespec is compound of
one or more states and then a value."""
if query_opt is not None:
return _list_from_statespec(
self.tk.call(self._name, "map", style, '-%s' % query_opt))
return _dict_from_tcltuple(
self.tk.call(self._name, "map", style, *(_format_mapdict(kw))))
def lookup(self, style, option, state=None, default=None):
"""Returns the value specified for option in style.
If state is specified it is expected to be a sequence of one
or more states. If the default argument is set, it is used as
a fallback value in case no specification for option is found."""
state = ' '.join(state) if state else ''
return self.tk.call(self._name, "lookup", style, '-%s' % option,
state, default)
def layout(self, style, layoutspec=None):
"""Define the widget layout for given style. If layoutspec is
omitted, return the layout specification for given style.
layoutspec is expected to be a list or an object different than
None that evaluates to False if you want to "turn off" that style.
If it is a list (or tuple, or something else), each item should be
a tuple where the first item is the layout name and the second item
should have the format described below:
LAYOUTS
A layout can contain the value None, if takes no options, or
a dict of options specifying how to arrange the element.
The layout mechanism uses a simplified version of the pack
geometry manager: given an initial cavity, each element is
allocated a parcel. Valid options/values are:
side: whichside
Specifies which side of the cavity to place the
element; one of top, right, bottom or left. If
omitted, the element occupies the entire cavity.
sticky: nswe
Specifies where the element is placed inside its
allocated parcel.
children: [sublayout... ]
Specifies a list of elements to place inside the
element. Each element is a tuple (or other sequence)
where the first item is the layout name, and the other
is a LAYOUT."""
lspec = None
if layoutspec:
lspec = _format_layoutlist(layoutspec)[0]
elif layoutspec is not None: # will disable the layout ({}, '', etc)
lspec = "null" # could be any other word, but this may make sense
# when calling layout(style) later
return _list_from_layouttuple(
self.tk.call(self._name, "layout", style, lspec))
def element_create(self, elementname, etype, *args, **kw):
"""Create a new element in the current theme of given etype."""
spec, opts = _format_elemcreate(etype, False, *args, **kw)
self.tk.call(self._name, "element", "create", elementname, etype,
spec, *opts)
def element_names(self):
"""Returns the list of elements defined in the current theme."""
return self.tk.call(self._name, "element", "names")
def element_options(self, elementname):
"""Return the list of elementname's options."""
return self.tk.call(self._name, "element", "options", elementname)
def theme_create(self, themename, parent=None, settings=None):
"""Creates a new theme.
It is an error if themename already exists. If parent is
specified, the new theme will inherit styles, elements and
layouts from the specified parent theme. If settings are present,
they are expected to have the same syntax used for theme_settings."""
script = _script_from_settings(settings) if settings else ''
if parent:
self.tk.call(self._name, "theme", "create", themename,
"-parent", parent, "-settings", script)
else:
self.tk.call(self._name, "theme", "create", themename,
"-settings", script)
def theme_settings(self, themename, settings):
"""Temporarily sets the current theme to themename, apply specified
settings and then restore the previous theme.
Each key in settings is a style and each value may contain the
keys 'configure', 'map', 'layout' and 'element create' and they
are expected to have the same format as specified by the methods
configure, map, layout and element_create respectively."""
script = _script_from_settings(settings)
self.tk.call(self._name, "theme", "settings", themename, script)
def theme_names(self):
"""Returns a list of all known themes."""
return self.tk.call(self._name, "theme", "names")
def theme_use(self, themename=None):
"""If themename is None, returns the theme in use, otherwise, set
the current theme to themename, refreshes all widgets and emits
a <<ThemeChanged>> event."""
if themename is None:
# Starting on Tk 8.6, checking this global is no longer needed
# since it allows doing self.tk.call(self._name, "theme", "use")
return self.tk.eval("return $ttk::currentTheme")
# using "ttk::setTheme" instead of "ttk::style theme use" causes
# the variable currentTheme to be updated, also, ttk::setTheme calls
# "ttk::style theme use" in order to change theme.
self.tk.call("ttk::setTheme", themename)
class Widget(tkinter.Widget):
"""Base class for Tk themed widgets."""
def __init__(self, master, widgetname, kw=None):
"""Constructs a Ttk Widget with the parent master.
STANDARD OPTIONS
class, cursor, takefocus, style
SCROLLABLE WIDGET OPTIONS
xscrollcommand, yscrollcommand
LABEL WIDGET OPTIONS
text, textvariable, underline, image, compound, width
WIDGET STATES
active, disabled, focus, pressed, selected, background,
readonly, alternate, invalid
"""
master = setup_master(master)
if not getattr(master, '_tile_loaded', False):
# Load tile now, if needed
_load_tile(master)
tkinter.Widget.__init__(self, master, widgetname, kw=kw)
def identify(self, x, y):
"""Returns the name of the element at position x, y, or the empty
string if the point does not lie within any element.
x and y are pixel coordinates relative to the widget."""
return self.tk.call(self._w, "identify", x, y)
def instate(self, statespec, callback=None, *args, **kw):
"""Test the widget's state.
If callback is not specified, returns True if the widget state
matches statespec and False otherwise. If callback is specified,
then it will be invoked with *args, **kw if the widget state
matches statespec. statespec is expected to be a sequence."""
ret = self.tk.call(self._w, "instate", ' '.join(statespec))
if ret and callback:
return callback(*args, **kw)
return bool(ret)
def state(self, statespec=None):
"""Modify or inquire widget state.
Widget state is returned if statespec is None, otherwise it is
set according to the statespec flags and then a new state spec
is returned indicating which flags were changed. statespec is
expected to be a sequence."""
if statespec is not None:
statespec = ' '.join(statespec)
return self.tk.splitlist(str(self.tk.call(self._w, "state", statespec)))
class Button(Widget):
"""Ttk Button widget, displays a textual label and/or image, and
evaluates a command when pressed."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Button widget with the parent master.
STANDARD OPTIONS
class, compound, cursor, image, state, style, takefocus,
text, textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
command, default, width
"""
Widget.__init__(self, master, "ttk::button", kw)
def invoke(self):
"""Invokes the command associated with the button."""
return self.tk.call(self._w, "invoke")
class Checkbutton(Widget):
"""Ttk Checkbutton widget which is either in on- or off-state."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Checkbutton widget with the parent master.
STANDARD OPTIONS
class, compound, cursor, image, state, style, takefocus,
text, textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
command, offvalue, onvalue, variable
"""
Widget.__init__(self, master, "ttk::checkbutton", kw)
def invoke(self):
"""Toggles between the selected and deselected states and
invokes the associated command. If the widget is currently
selected, sets the option variable to the offvalue option
and deselects the widget; otherwise, sets the option variable
to the option onvalue.
Returns the result of the associated command."""
return self.tk.call(self._w, "invoke")
class Entry(Widget, tkinter.Entry):
"""Ttk Entry widget displays a one-line text string and allows that
string to be edited by the user."""
def __init__(self, master=None, widget=None, **kw):
"""Constructs a Ttk Entry widget with the parent master.
STANDARD OPTIONS
class, cursor, style, takefocus, xscrollcommand
WIDGET-SPECIFIC OPTIONS
exportselection, invalidcommand, justify, show, state,
textvariable, validate, validatecommand, width
VALIDATION MODES
none, key, focus, focusin, focusout, all
"""
Widget.__init__(self, master, widget or "ttk::entry", kw)
def bbox(self, index):
"""Return a tuple of (x, y, width, height) which describes the
bounding box of the character given by index."""
return self.tk.call(self._w, "bbox", index)
def identify(self, x, y):
"""Returns the name of the element at position x, y, or the
empty string if the coordinates are outside the window."""
return self.tk.call(self._w, "identify", x, y)
def validate(self):
"""Force revalidation, independent of the conditions specified
by the validate option. Returns False if validation fails, True
if it succeeds. Sets or clears the invalid state accordingly."""
return bool(self.tk.call(self._w, "validate"))
class Combobox(Entry):
"""Ttk Combobox widget combines a text field with a pop-down list of
values."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Combobox widget with the parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
exportselection, justify, height, postcommand, state,
textvariable, values, width
"""
# The "values" option may need special formatting, so leave to
# _format_optdict the responsibility to format it
if "values" in kw:
kw["values"] = _format_optdict({'v': kw["values"]})[1]
Entry.__init__(self, master, "ttk::combobox", **kw)
def __setitem__(self, item, value):
if item == "values":
value = _format_optdict({item: value})[1]
Entry.__setitem__(self, item, value)
def configure(self, cnf=None, **kw):
"""Custom Combobox configure, created to properly format the values
option."""
if "values" in kw:
kw["values"] = _format_optdict({'v': kw["values"]})[1]
return Entry.configure(self, cnf, **kw)
def current(self, newindex=None):
"""If newindex is supplied, sets the combobox value to the
element at position newindex in the list of values. Otherwise,
returns the index of the current value in the list of values
or -1 if the current value does not appear in the list."""
return self.tk.call(self._w, "current", newindex)
def set(self, value):
"""Sets the value of the combobox to value."""
self.tk.call(self._w, "set", value)
class Frame(Widget):
"""Ttk Frame widget is a container, used to group other widgets
together."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Frame with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
borderwidth, relief, padding, width, height
"""
Widget.__init__(self, master, "ttk::frame", kw)
class Label(Widget):
"""Ttk Label widget displays a textual label and/or image."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Label with parent master.
STANDARD OPTIONS
class, compound, cursor, image, style, takefocus, text,
textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
anchor, background, font, foreground, justify, padding,
relief, text, wraplength
"""
Widget.__init__(self, master, "ttk::label", kw)
class Labelframe(Widget):
"""Ttk Labelframe widget is a container used to group other widgets
together. It has an optional label, which may be a plain text string
or another widget."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Labelframe with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
labelanchor, text, underline, padding, labelwidget, width,
height
"""
Widget.__init__(self, master, "ttk::labelframe", kw)
LabelFrame = Labelframe # tkinter name compatibility
class Menubutton(Widget):
"""Ttk Menubutton widget displays a textual label and/or image, and
displays a menu when pressed."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Menubutton with parent master.
STANDARD OPTIONS
class, compound, cursor, image, state, style, takefocus,
text, textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
direction, menu
"""
Widget.__init__(self, master, "ttk::menubutton", kw)
class Notebook(Widget):
"""Ttk Notebook widget manages a collection of windows and displays
a single one at a time. Each child window is associated with a tab,
which the user may select to change the currently-displayed window."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Notebook with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
height, padding, width
TAB OPTIONS
state, sticky, padding, text, image, compound, underline
TAB IDENTIFIERS (tab_id)
The tab_id argument found in several methods may take any of
the following forms:
* An integer between zero and the number of tabs
* The name of a child window
* A positional specification of the form "@x,y", which
defines the tab
* The string "current", which identifies the
currently-selected tab
* The string "end", which returns the number of tabs (only
valid for method index)
"""
Widget.__init__(self, master, "ttk::notebook", kw)
def add(self, child, **kw):
"""Adds a new tab to the notebook.
If window is currently managed by the notebook but hidden, it is
restored to its previous position."""
self.tk.call(self._w, "add", child, *(_format_optdict(kw)))
def forget(self, tab_id):
"""Removes the tab specified by tab_id, unmaps and unmanages the
associated window."""
self.tk.call(self._w, "forget", tab_id)
def hide(self, tab_id):
"""Hides the tab specified by tab_id.
The tab will not be displayed, but the associated window remains
managed by the notebook and its configuration remembered. Hidden
tabs may be restored with the add command."""
self.tk.call(self._w, "hide", tab_id)
def identify(self, x, y):
"""Returns the name of the tab element at position x, y, or the
empty string if none."""
return self.tk.call(self._w, "identify", x, y)
def index(self, tab_id):
"""Returns the numeric index of the tab specified by tab_id, or
the total number of tabs if tab_id is the string "end"."""
return self.tk.call(self._w, "index", tab_id)
def insert(self, pos, child, **kw):
"""Inserts a pane at the specified position.
pos is either the string end, an integer index, or the name of
a managed child. If child is already managed by the notebook,
moves it to the specified position."""
self.tk.call(self._w, "insert", pos, child, *(_format_optdict(kw)))
def select(self, tab_id=None):
"""Selects the specified tab.
The associated child window will be displayed, and the
previously-selected window (if different) is unmapped. If tab_id
is omitted, returns the widget name of the currently selected
pane."""
return self.tk.call(self._w, "select", tab_id)
def tab(self, tab_id, option=None, **kw):
"""Query or modify the options of the specific tab_id.
If kw is not given, returns a dict of the tab option values. If option
is specified, returns the value of that option. Otherwise, sets the
options to the corresponding values."""
if option is not None:
kw[option] = None
return _val_or_dict(kw, self.tk.call, self._w, "tab", tab_id)
def tabs(self):
"""Returns a list of windows managed by the notebook."""
return self.tk.call(self._w, "tabs") or ()
def enable_traversal(self):
"""Enable keyboard traversal for a toplevel window containing
this notebook.
This will extend the bindings for the toplevel window containing
this notebook as follows:
Control-Tab: selects the tab following the currently selected
one
Shift-Control-Tab: selects the tab preceding the currently
selected one
Alt-K: where K is the mnemonic (underlined) character of any
tab, will select that tab.
Multiple notebooks in a single toplevel may be enabled for
traversal, including nested notebooks. However, notebook traversal
only works properly if all panes are direct children of the
notebook."""
# The only, and good, difference I see is about mnemonics, which works
# after calling this method. Control-Tab and Shift-Control-Tab always
# works (here at least).
self.tk.call("ttk::notebook::enableTraversal", self._w)
class Panedwindow(Widget, tkinter.PanedWindow):
"""Ttk Panedwindow widget displays a number of subwindows, stacked
either vertically or horizontally."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Panedwindow with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
orient, width, height
PANE OPTIONS
weight
"""
Widget.__init__(self, master, "ttk::panedwindow", kw)
forget = tkinter.PanedWindow.forget # overrides Pack.forget
def insert(self, pos, child, **kw):
"""Inserts a pane at the specified positions.
pos is either the string end, and integer index, or the name
of a child. If child is already managed by the paned window,
moves it to the specified position."""
self.tk.call(self._w, "insert", pos, child, *(_format_optdict(kw)))
def pane(self, pane, option=None, **kw):
"""Query or modify the options of the specified pane.
pane is either an integer index or the name of a managed subwindow.
If kw is not given, returns a dict of the pane option values. If
option is specified then the value for that option is returned.
Otherwise, sets the options to the corresponding values."""
if option is not None:
kw[option] = None
return _val_or_dict(kw, self.tk.call, self._w, "pane", pane)
def sashpos(self, index, newpos=None):
"""If newpos is specified, sets the position of sash number index.
May adjust the positions of adjacent sashes to ensure that
positions are monotonically increasing. Sash positions are further
constrained to be between 0 and the total size of the widget.
Returns the new position of sash number index."""
return self.tk.call(self._w, "sashpos", index, newpos)
PanedWindow = Panedwindow # tkinter name compatibility
class Progressbar(Widget):
"""Ttk Progressbar widget shows the status of a long-running
operation. They can operate in two modes: determinate mode shows the
amount completed relative to the total amount of work to be done, and
indeterminate mode provides an animated display to let the user know
that something is happening."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Progressbar with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
orient, length, mode, maximum, value, variable, phase
"""
Widget.__init__(self, master, "ttk::progressbar", kw)
def start(self, interval=None):
"""Begin autoincrement mode: schedules a recurring timer event
that calls method step every interval milliseconds.
interval defaults to 50 milliseconds (20 steps/second) if ommited."""
self.tk.call(self._w, "start", interval)
def step(self, amount=None):
"""Increments the value option by amount.
amount defaults to 1.0 if omitted."""
self.tk.call(self._w, "step", amount)
def stop(self):
"""Stop autoincrement mode: cancels any recurring timer event
initiated by start."""
self.tk.call(self._w, "stop")
class Radiobutton(Widget):
"""Ttk Radiobutton widgets are used in groups to show or change a
set of mutually-exclusive options."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Radiobutton with parent master.
STANDARD OPTIONS
class, compound, cursor, image, state, style, takefocus,
text, textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
command, value, variable
"""
Widget.__init__(self, master, "ttk::radiobutton", kw)
def invoke(self):
"""Sets the option variable to the option value, selects the
widget, and invokes the associated command.
Returns the result of the command, or an empty string if
no command is specified."""
return self.tk.call(self._w, "invoke")
class Scale(Widget, tkinter.Scale):
"""Ttk Scale widget is typically used to control the numeric value of
a linked variable that varies uniformly over some range."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Scale with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
command, from, length, orient, to, value, variable
"""
Widget.__init__(self, master, "ttk::scale", kw)
def configure(self, cnf=None, **kw):
"""Modify or query scale options.
Setting a value for any of the "from", "from_" or "to" options
generates a <<RangeChanged>> event."""
if cnf:
kw.update(cnf)
Widget.configure(self, **kw)
if any(['from' in kw, 'from_' in kw, 'to' in kw]):
self.event_generate('<<RangeChanged>>')
def get(self, x=None, y=None):
"""Get the current value of the value option, or the value
corresponding to the coordinates x, y if they are specified.
x and y are pixel coordinates relative to the scale widget
origin."""
return self.tk.call(self._w, 'get', x, y)
class Scrollbar(Widget, tkinter.Scrollbar):
"""Ttk Scrollbar controls the viewport of a scrollable widget."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Scrollbar with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
command, orient
"""
Widget.__init__(self, master, "ttk::scrollbar", kw)
class Separator(Widget):
"""Ttk Separator widget displays a horizontal or vertical separator
bar."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Separator with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
orient
"""
Widget.__init__(self, master, "ttk::separator", kw)
class Sizegrip(Widget):
"""Ttk Sizegrip allows the user to resize the containing toplevel
window by pressing and dragging the grip."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Sizegrip with parent master.
STANDARD OPTIONS
class, cursor, state, style, takefocus
"""
Widget.__init__(self, master, "ttk::sizegrip", kw)
class Treeview(Widget, tkinter.XView, tkinter.YView):
"""Ttk Treeview widget displays a hierarchical collection of items.
Each item has a textual label, an optional image, and an optional list
of data values. The data values are displayed in successive columns
after the tree label."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Treeview with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus, xscrollcommand,
yscrollcommand
WIDGET-SPECIFIC OPTIONS
columns, displaycolumns, height, padding, selectmode, show
ITEM OPTIONS
text, image, values, open, tags
TAG OPTIONS
foreground, background, font, image
"""
Widget.__init__(self, master, "ttk::treeview", kw)
def bbox(self, item, column=None):
"""Returns the bounding box (relative to the treeview widget's
window) of the specified item in the form x y width height.
If column is specified, returns the bounding box of that cell.
If the item is not visible (i.e., if it is a descendant of a
closed item or is scrolled offscreen), returns an empty string."""
return self.tk.call(self._w, "bbox", item, column)
def get_children(self, item=None):
"""Returns a tuple of children belonging to item.
If item is not specified, returns root children."""
return self.tk.call(self._w, "children", item or '') or ()
def set_children(self, item, *newchildren):
"""Replaces item's child with newchildren.
Children present in item that are not present in newchildren
are detached from tree. No items in newchildren may be an
ancestor of item."""
self.tk.call(self._w, "children", item, newchildren)
def column(self, column, option=None, **kw):
"""Query or modify the options for the specified column.
If kw is not given, returns a dict of the column option values. If
option is specified then the value for that option is returned.
Otherwise, sets the options to the corresponding values."""
if option is not None:
kw[option] = None
return _val_or_dict(kw, self.tk.call, self._w, "column", column)
def delete(self, *items):
"""Delete all specified items and all their descendants. The root
item may not be deleted."""
self.tk.call(self._w, "delete", items)
def detach(self, *items):
"""Unlinks all of the specified items from the tree.
The items and all of their descendants are still present, and may
be reinserted at another point in the tree, but will not be
displayed. The root item may not be detached."""
self.tk.call(self._w, "detach", items)
def exists(self, item):
"""Returns True if the specified item is present in the tree,
False otherwise."""
return bool(self.tk.call(self._w, "exists", item))
def focus(self, item=None):
"""If item is specified, sets the focus item to item. Otherwise,
returns the current focus item, or '' if there is none."""
return self.tk.call(self._w, "focus", item)
def heading(self, column, option=None, **kw):
"""Query or modify the heading options for the specified column.
If kw is not given, returns a dict of the heading option values. If
option is specified then the value for that option is returned.
Otherwise, sets the options to the corresponding values.
Valid options/values are:
text: text
The text to display in the column heading
image: image_name
Specifies an image to display to the right of the column
heading
anchor: anchor
Specifies how the heading text should be aligned. One of
the standard Tk anchor values
command: callback
A callback to be invoked when the heading label is
pressed.
To configure the tree column heading, call this with column = "#0" """
cmd = kw.get('command')
if cmd and not isinstance(cmd, str):
# callback not registered yet, do it now
kw['command'] = self.master.register(cmd, self._substitute)
if option is not None:
kw[option] = None
return _val_or_dict(kw, self.tk.call, self._w, 'heading', column)
def identify(self, component, x, y):
"""Returns a description of the specified component under the
point given by x and y, or the empty string if no such component
is present at that position."""
return self.tk.call(self._w, "identify", component, x, y)
def identify_row(self, y):
"""Returns the item ID of the item at position y."""
return self.identify("row", 0, y)
def identify_column(self, x):
"""Returns the data column identifier of the cell at position x.
The tree column has ID #0."""
return self.identify("column", x, 0)
def identify_region(self, x, y):
"""Returns one of:
heading: Tree heading area.
separator: Space between two columns headings;
tree: The tree area.
cell: A data cell.
* Availability: Tk 8.6"""
return self.identify("region", x, y)
def identify_element(self, x, y):
"""Returns the element at position x, y.
* Availability: Tk 8.6"""
return self.identify("element", x, y)
def index(self, item):
"""Returns the integer index of item within its parent's list
of children."""
return self.tk.call(self._w, "index", item)
def insert(self, parent, index, iid=None, **kw):
"""Creates a new item and return the item identifier of the newly
created item.
parent is the item ID of the parent item, or the empty string
to create a new top-level item. index is an integer, or the value
end, specifying where in the list of parent's children to insert
the new item. If index is less than or equal to zero, the new node
is inserted at the beginning, if index is greater than or equal to
the current number of children, it is inserted at the end. If iid
is specified, it is used as the item identifier, iid must not
already exist in the tree. Otherwise, a new unique identifier
is generated."""
opts = _format_optdict(kw)
if iid:
res = self.tk.call(self._w, "insert", parent, index,
"-id", iid, *opts)
else:
res = self.tk.call(self._w, "insert", parent, index, *opts)
return res
def item(self, item, option=None, **kw):
"""Query or modify the options for the specified item.
If no options are given, a dict with options/values for the item
is returned. If option is specified then the value for that option
is returned. Otherwise, sets the options to the corresponding
values as given by kw."""
if option is not None:
kw[option] = None
return _val_or_dict(kw, self.tk.call, self._w, "item", item)
def move(self, item, parent, index):
"""Moves item to position index in parent's list of children.
It is illegal to move an item under one of its descendants. If
index is less than or equal to zero, item is moved to the
beginning, if greater than or equal to the number of children,
it is moved to the end. If item was detached it is reattached."""
self.tk.call(self._w, "move", item, parent, index)
reattach = move # A sensible method name for reattaching detached items
def next(self, item):
"""Returns the identifier of item's next sibling, or '' if item
is the last child of its parent."""
return self.tk.call(self._w, "next", item)
def parent(self, item):
"""Returns the ID of the parent of item, or '' if item is at the
top level of the hierarchy."""
return self.tk.call(self._w, "parent", item)
def prev(self, item):
"""Returns the identifier of item's previous sibling, or '' if
item is the first child of its parent."""
return self.tk.call(self._w, "prev", item)
def see(self, item):
"""Ensure that item is visible.
Sets all of item's ancestors open option to True, and scrolls
the widget if necessary so that item is within the visible
portion of the tree."""
self.tk.call(self._w, "see", item)
def selection(self, selop=None, items=None):
"""If selop is not specified, returns selected items."""
return self.tk.call(self._w, "selection", selop, items)
def selection_set(self, items):
"""items becomes the new selection."""
self.selection("set", items)
def selection_add(self, items):
"""Add items to the selection."""
self.selection("add", items)
def selection_remove(self, items):
"""Remove items from the selection."""
self.selection("remove", items)
def selection_toggle(self, items):
"""Toggle the selection state of each item in items."""
self.selection("toggle", items)
def set(self, item, column=None, value=None):
"""With one argument, returns a dictionary of column/value pairs
for the specified item. With two arguments, returns the current
value of the specified column. With three arguments, sets the
value of given column in given item to the specified value."""
res = self.tk.call(self._w, "set", item, column, value)
if column is None and value is None:
return _dict_from_tcltuple(res, False)
else:
return res
def tag_bind(self, tagname, sequence=None, callback=None):
"""Bind a callback for the given event sequence to the tag tagname.
When an event is delivered to an item, the callbacks for each
of the item's tags option are called."""
self._bind((self._w, "tag", "bind", tagname), sequence, callback, add=0)
def tag_configure(self, tagname, option=None, **kw):
"""Query or modify the options for the specified tagname.
If kw is not given, returns a dict of the option settings for tagname.
If option is specified, returns the value for that option for the
specified tagname. Otherwise, sets the options to the corresponding
values for the given tagname."""
if option is not None:
kw[option] = None
return _val_or_dict(kw, self.tk.call, self._w, "tag", "configure",
tagname)
def tag_has(self, tagname, item=None):
"""If item is specified, returns 1 or 0 depending on whether the
specified item has the given tagname. Otherwise, returns a list of
all items which have the specified tag.
* Availability: Tk 8.6"""
return self.tk.call(self._w, "tag", "has", tagname, item)
# Extensions
class LabeledScale(Frame):
"""A Ttk Scale widget with a Ttk Label widget indicating its
current value.
The Ttk Scale can be accessed through instance.scale, and Ttk Label
can be accessed through instance.label"""
def __init__(self, master=None, variable=None, from_=0, to=10, **kw):
"""Construct an horizontal LabeledScale with parent master, a
variable to be associated with the Ttk Scale widget and its range.
If variable is not specified, a tkinter.IntVar is created.
WIDGET-SPECIFIC OPTIONS
compound: 'top' or 'bottom'
Specifies how to display the label relative to the scale.
Defaults to 'top'.
"""
self._label_top = kw.pop('compound', 'top') == 'top'
Frame.__init__(self, master, **kw)
self._variable = variable or tkinter.IntVar(master)
self._variable.set(from_)
self._last_valid = from_
self.label = Label(self)
self.scale = Scale(self, variable=self._variable, from_=from_, to=to)
self.scale.bind('<<RangeChanged>>', self._adjust)
# position scale and label according to the compound option
scale_side = 'bottom' if self._label_top else 'top'
label_side = 'top' if scale_side == 'bottom' else 'bottom'
self.scale.pack(side=scale_side, fill='x')
tmp = Label(self).pack(side=label_side) # place holder
self.label.place(anchor='n' if label_side == 'top' else 's')
# update the label as scale or variable changes
self.__tracecb = self._variable.trace_variable('w', self._adjust)
self.bind('<Configure>', self._adjust)
self.bind('<Map>', self._adjust)
def destroy(self):
"""Destroy this widget and possibly its associated variable."""
try:
self._variable.trace_vdelete('w', self.__tracecb)
except AttributeError:
# widget has been destroyed already
pass
else:
del self._variable
Frame.destroy(self)
def _adjust(self, *args):
"""Adjust the label position according to the scale."""
def adjust_label():
self.update_idletasks() # "force" scale redraw
x, y = self.scale.coords()
if self._label_top:
y = self.scale.winfo_y() - self.label.winfo_reqheight()
else:
y = self.scale.winfo_reqheight() + self.label.winfo_reqheight()
self.label.place_configure(x=x, y=y)
from_, to = self.scale['from'], self.scale['to']
if to < from_:
from_, to = to, from_
newval = self._variable.get()
if not from_ <= newval <= to:
# value outside range, set value back to the last valid one
self.value = self._last_valid
return
self._last_valid = newval
self.label['text'] = newval
self.after_idle(adjust_label)
def _get_value(self):
"""Return current scale value."""
return self._variable.get()
def _set_value(self, val):
"""Set new scale value."""
self._variable.set(val)
value = property(_get_value, _set_value)
class OptionMenu(Menubutton):
"""Themed OptionMenu, based after tkinter's OptionMenu, which allows
the user to select a value from a menu."""
def __init__(self, master, variable, default=None, *values, **kwargs):
"""Construct a themed OptionMenu widget with master as the parent,
the resource textvariable set to variable, the initially selected
value specified by the default parameter, the menu values given by
*values and additional keywords.
WIDGET-SPECIFIC OPTIONS
style: stylename
Menubutton style.
direction: 'above', 'below', 'left', 'right', or 'flush'
Menubutton direction.
command: callback
A callback that will be invoked after selecting an item.
"""
kw = {'textvariable': variable, 'style': kwargs.pop('style', None),
'direction': kwargs.pop('direction', None)}
Menubutton.__init__(self, master, **kw)
self['menu'] = tkinter.Menu(self, tearoff=False)
self._variable = variable
self._callback = kwargs.pop('command', None)
if kwargs:
raise tkinter.TclError('unknown option -%s' % (
next(iter(kwargs.keys()))))
self.set_menu(default, *values)
def __getitem__(self, item):
if item == 'menu':
return self.nametowidget(Menubutton.__getitem__(self, item))
return Menubutton.__getitem__(self, item)
def set_menu(self, default=None, *values):
"""Build a new menu of radiobuttons with *values and optionally
a default value."""
menu = self['menu']
menu.delete(0, 'end')
for val in values:
menu.add_radiobutton(label=val,
command=tkinter._setit(self._variable, val, self._callback))
if default:
self._variable.set(default)
def destroy(self):
"""Destroy this widget and its associated variable."""
del self._variable
Menubutton.destroy(self)
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-3.3.0/Lib/tkinter/ttk.py
|
Python
|
mit
| 56,245
|
# -*- coding: utf-8 -*-
"""Doctest for method/function calls.
We're going the use these types for extra testing
>>> from UserList import UserList
>>> from UserDict import UserDict
We're defining four helper functions
>>> def e(a,b):
... print a, b
>>> def f(*a, **k):
... print a, test_support.sortdict(k)
>>> def g(x, *y, **z):
... print x, y, test_support.sortdict(z)
>>> def h(j=1, a=2, h=3):
... print j, a, h
Argument list examples
>>> f()
() {}
>>> f(1)
(1,) {}
>>> f(1, 2)
(1, 2) {}
>>> f(1, 2, 3)
(1, 2, 3) {}
>>> f(1, 2, 3, *(4, 5))
(1, 2, 3, 4, 5) {}
>>> f(1, 2, 3, *[4, 5])
(1, 2, 3, 4, 5) {}
>>> f(1, 2, 3, *UserList([4, 5]))
(1, 2, 3, 4, 5) {}
Here we add keyword arguments
>>> f(1, 2, 3, **{'a':4, 'b':5})
(1, 2, 3) {'a': 4, 'b': 5}
>>> f(1, 2, 3, *[4, 5], **{'a':6, 'b':7})
(1, 2, 3, 4, 5) {'a': 6, 'b': 7}
>>> f(1, 2, 3, x=4, y=5, *(6, 7), **{'a':8, 'b': 9})
(1, 2, 3, 6, 7) {'a': 8, 'b': 9, 'x': 4, 'y': 5}
>>> f(1, 2, 3, **UserDict(a=4, b=5))
(1, 2, 3) {'a': 4, 'b': 5}
>>> f(1, 2, 3, *(4, 5), **UserDict(a=6, b=7))
(1, 2, 3, 4, 5) {'a': 6, 'b': 7}
>>> f(1, 2, 3, x=4, y=5, *(6, 7), **UserDict(a=8, b=9))
(1, 2, 3, 6, 7) {'a': 8, 'b': 9, 'x': 4, 'y': 5}
Examples with invalid arguments (TypeErrors). We're also testing the function
names in the exception messages.
Verify clearing of SF bug #733667
>>> e(c=4)
Traceback (most recent call last):
...
TypeError: e() got an unexpected keyword argument 'c'
>>> g()
Traceback (most recent call last):
...
TypeError: g() takes at least 1 argument (0 given)
>>> g(*())
Traceback (most recent call last):
...
TypeError: g() takes at least 1 argument (0 given)
>>> g(*(), **{})
Traceback (most recent call last):
...
TypeError: g() takes at least 1 argument (0 given)
>>> g(1)
1 () {}
>>> g(1, 2)
1 (2,) {}
>>> g(1, 2, 3)
1 (2, 3) {}
>>> g(1, 2, 3, *(4, 5))
1 (2, 3, 4, 5) {}
>>> class Nothing: pass
...
>>> g(*Nothing())
Traceback (most recent call last):
...
TypeError: g() argument after * must be an iterable, not instance
>>> class Nothing:
... def __len__(self): return 5
...
>>> g(*Nothing())
Traceback (most recent call last):
...
TypeError: g() argument after * must be an iterable, not instance
>>> class Nothing():
... def __len__(self): return 5
... def __getitem__(self, i):
... if i<3: return i
... else: raise IndexError(i)
...
>>> g(*Nothing())
0 (1, 2) {}
>>> class Nothing:
... def __init__(self): self.c = 0
... def __iter__(self): return self
... def next(self):
... if self.c == 4:
... raise StopIteration
... c = self.c
... self.c += 1
... return c
...
>>> g(*Nothing())
0 (1, 2, 3) {}
Check for issue #4806: Does a TypeError in a generator get propagated with the
right error message?
>>> def broken(): raise TypeError("myerror")
...
>>> g(*(broken() for i in range(1)))
Traceback (most recent call last):
...
TypeError: myerror
Make sure that the function doesn't stomp the dictionary
>>> d = {'a': 1, 'b': 2, 'c': 3}
>>> d2 = d.copy()
>>> g(1, d=4, **d)
1 () {'a': 1, 'b': 2, 'c': 3, 'd': 4}
>>> d == d2
True
What about willful misconduct?
>>> def saboteur(**kw):
... kw['x'] = 'm'
... return kw
>>> d = {}
>>> kw = saboteur(a=1, **d)
>>> d
{}
>>> g(1, 2, 3, **{'x': 4, 'y': 5})
Traceback (most recent call last):
...
TypeError: g() got multiple values for keyword argument 'x'
>>> f(**{1:2})
Traceback (most recent call last):
...
TypeError: f() keywords must be strings
>>> h(**{'e': 2})
Traceback (most recent call last):
...
TypeError: h() got an unexpected keyword argument 'e'
>>> h(*h)
Traceback (most recent call last):
...
TypeError: h() argument after * must be an iterable, not function
>>> dir(*h)
Traceback (most recent call last):
...
TypeError: dir() argument after * must be an iterable, not function
>>> None(*h)
Traceback (most recent call last):
...
TypeError: NoneType object argument after * must be an iterable, \
not function
>>> h(**h)
Traceback (most recent call last):
...
TypeError: h() argument after ** must be a mapping, not function
>>> dir(**h)
Traceback (most recent call last):
...
TypeError: dir() argument after ** must be a mapping, not function
>>> None(**h)
Traceback (most recent call last):
...
TypeError: NoneType object argument after ** must be a mapping, \
not function
>>> dir(b=1, **{'b': 1})
Traceback (most recent call last):
...
TypeError: dir() got multiple values for keyword argument 'b'
Another helper function
>>> def f2(*a, **b):
... return a, b
>>> d = {}
>>> for i in xrange(512):
... key = 'k%d' % i
... d[key] = i
>>> a, b = f2(1, *(2,3), **d)
>>> len(a), len(b), b == d
(3, 512, True)
>>> class Foo:
... def method(self, arg1, arg2):
... return arg1+arg2
>>> x = Foo()
>>> Foo.method(*(x, 1, 2))
3
>>> Foo.method(x, *(1, 2))
3
>>> Foo.method(*(1, 2, 3))
Traceback (most recent call last):
...
TypeError: unbound method method() must be called with Foo instance as \
first argument (got int instance instead)
>>> Foo.method(1, *[2, 3])
Traceback (most recent call last):
...
TypeError: unbound method method() must be called with Foo instance as \
first argument (got int instance instead)
A PyCFunction that takes only positional parameters should allow an
empty keyword dictionary to pass without a complaint, but raise a
TypeError if te dictionary is not empty
>>> try:
... silence = id(1, *{})
... True
... except:
... False
True
>>> id(1, **{'foo': 1})
Traceback (most recent call last):
...
TypeError: id() takes no keyword arguments
A corner case of keyword dictionary items being deleted during
the function call setup. See <http://bugs.python.org/issue2016>.
>>> class Name(str):
... def __eq__(self, other):
... try:
... del x[self]
... except KeyError:
... pass
... return str.__eq__(self, other)
... def __hash__(self):
... return str.__hash__(self)
>>> x = {Name("a"):1, Name("b"):2}
>>> def f(a, b):
... print a,b
>>> f(**x)
1 2
An obscure message:
>>> def f(a, b):
... pass
>>> f(b=1)
Traceback (most recent call last):
...
TypeError: f() takes exactly 2 arguments (1 given)
The number of arguments passed in includes keywords:
>>> def f(a):
... pass
>>> f(6, a=4, *(1, 2, 3))
Traceback (most recent call last):
...
TypeError: f() takes exactly 1 argument (5 given)
"""
import unittest
import sys
from test import test_support
class ExtCallTest(unittest.TestCase):
def test_unicode_keywords(self):
def f(a):
return a
self.assertEqual(f(**{u'a': 4}), 4)
self.assertRaises(TypeError, f, **{u'stören': 4})
self.assertRaises(TypeError, f, **{u'someLongString':2})
try:
f(a=4, **{u'a': 4})
except TypeError:
pass
else:
self.fail("duplicate arguments didn't raise")
def test_main():
test_support.run_doctest(sys.modules[__name__], True)
test_support.run_unittest(ExtCallTest)
if __name__ == '__main__':
test_main()
|
wang1352083/pythontool
|
python-2.7.12-lib/test/test_extcall.py
|
Python
|
mit
| 7,975
|
from asposebarcode import Settings
from com.aspose.barcoderecognition import BarCodeReadType
from com.aspose.barcoderecognition import BarCodeReader
class GetBarcodeRecognitionQuality:
def __init__(self):
dataDir = Settings.dataDir + 'WorkingWithBarcodeRecognition/AdvancedBarcodeRecognitionFeatures/GetBarcodeRecognitionQuality/'
img = dataDir + "barcode.jpg"
# initialize barcode reader
barcode_reader_type = BarCodeReadType
reader = BarCodeReader(img, barcode_reader_type.Code39Standard)
# Call read method
while (reader.read()):
print "Barcode CodeText: " + reader.getCodeText()
print " Barcode Type: "
print reader.getReadType()
percent = reader.getRecognitionQuality()
print "Barcode Quality Percentage: "
print percent
# Close reader
reader.close()
if __name__ == '__main__':
GetBarcodeRecognitionQuality()
|
asposebarcode/Aspose_BarCode_Java
|
Plugins/Aspose.BarCode Java for Jython/asposebarcode/WorkingWithBarcodeRecognition/AdvancedBarcodeRecognitionFeatures/GetBarcodeRecognitionQuality.py
|
Python
|
mit
| 998
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('imager_profile', '0004_auto_20150802_0153'),
]
operations = [
migrations.RemoveField(
model_name='imagerprofile',
name='name',
),
migrations.AddField(
model_name='imagerprofile',
name='nickname',
field=models.CharField(max_length=128, null=True, blank=True),
),
migrations.AlterField(
model_name='imagerprofile',
name='address',
field=models.TextField(null=True, blank=True),
),
migrations.AlterField(
model_name='imagerprofile',
name='camera',
field=models.CharField(help_text=b'What is the make and model of your camera?', max_length=128, null=True, blank=True),
),
migrations.AlterField(
model_name='imagerprofile',
name='photography_type',
field=models.CharField(blank=True, max_length=64, null=True, help_text=b'What is your photography type?', choices=[(b'H', b'Hobbist'), (b'A', b'Abstract'), (b'B', b'Black and White'), (b'P', b'Panorama'), (b'J', b'Journalism')]),
),
migrations.AlterField(
model_name='imagerprofile',
name='website_url',
field=models.URLField(null=True, blank=True),
),
]
|
tpeek/bike_safety
|
imagersite/imager_profile/migrations/0005_auto_20150802_0303.py
|
Python
|
mit
| 1,491
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# hello.py
# A Hello World program using Tkinter package.
#
# Author: Billy Wilson Arante
# Created: 2016/10/29 EDT
#
# Attribution: http://effbot.org/tkinterbook/tkinter-hello-tkinter.htm
from Tkinter import *
def main():
"""Main"""
root = Tk()
label = Label(root, text="Hello, world!")
label.pack()
root.mainloop()
if __name__ == "__main__":
# Executes only if run as script
main()
|
arantebillywilson/python-snippets
|
py2/tkinter/hello.py
|
Python
|
mit
| 465
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_marooned_pirate_tran_m.iff"
result.attribute_template_id = 9
result.stfName("npc_name","trandoshan_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/mobile/shared_dressed_marooned_pirate_tran_m.py
|
Python
|
mit
| 460
|
"""Query the switch for configured queues on a port."""
# System imports
# Third-party imports
# Local source tree imports
from pyof.foundation.base import GenericMessage
from pyof.foundation.basic_types import Pad, UBInt32
from pyof.v0x04.common.header import Header, Type
from pyof.v0x04.common.port import PortNo
__all__ = ('QueueGetConfigRequest',)
class QueueGetConfigRequest(GenericMessage):
"""Query structure for configured queues on a port."""
#: Openflow :class:`~pyof.v0x04.common.header.Header`.
header = Header(message_type=Type.OFPT_GET_CONFIG_REQUEST)
#: Port to be queried. Should refer to a valid physical port
#: (i.e. < OFPP_MAX), or OFPP_ANY to request all configured queues.
port = UBInt32(enum_ref=PortNo)
pad = Pad(4)
def __init__(self, xid=None, port=None):
"""Create a QueueGetConfigRequest with the optional parameters below.
Args:
xid (int): xid of OpenFlow header
port (:class:`~.common.port.PortNo`): Target port for the query.
"""
super().__init__(xid)
self.port = port
|
kytos/python-openflow
|
pyof/v0x04/controller2switch/queue_get_config_request.py
|
Python
|
mit
| 1,105
|
# encoding: utf-8
" This sub-module provides 'sequence awareness' for blessed."
__author__ = 'Jeff Quast <contact@jeffquast.com>'
__license__ = 'MIT'
__all__ = ('init_sequence_patterns', 'Sequence', 'SequenceTextWrapper',)
# built-ins
import functools
import textwrap
import warnings
import math
import sys
import re
# local
from ._binterms import binary_terminals as _BINTERM_UNSUPPORTED
# 3rd-party
import wcwidth # https://github.com/jquast/wcwidth
_BINTERM_UNSUPPORTED_MSG = (
u"Terminal kind {0!r} contains binary-packed capabilities, blessed "
u"is likely to fail to measure the length of its sequences.")
if sys.version_info[0] == 3:
text_type = str
else:
text_type = unicode # noqa
def _merge_sequences(inp):
"""Merge a list of input sequence patterns for use in a regular expression.
Order by lengthyness (full sequence set precedent over subset),
and exclude any empty (u'') sequences.
"""
return sorted(list(filter(None, inp)), key=len, reverse=True)
def _build_numeric_capability(term, cap, optional=False,
base_num=99, nparams=1):
""" Build regexp from capabilities having matching numeric
parameter contained within termcap value: n->(\d+).
"""
_cap = getattr(term, cap)
opt = '?' if optional else ''
if _cap:
args = (base_num,) * nparams
cap_re = re.escape(_cap(*args))
for num in range(base_num - 1, base_num + 2):
# search for matching ascii, n-1 through n+1
if str(num) in cap_re:
# modify & return n to matching digit expression
cap_re = cap_re.replace(str(num), r'(\d+)%s' % (opt,))
return cap_re
warnings.warn('Unknown parameter in %r (%r, %r)' % (cap, _cap, cap_re))
return None # no such capability
def _build_any_numeric_capability(term, cap, num=99, nparams=1):
""" Build regexp from capabilities having *any* digit parameters
(substitute matching \d with pattern \d and return).
"""
_cap = getattr(term, cap)
if _cap:
cap_re = re.escape(_cap(*((num,) * nparams)))
cap_re = re.sub('(\d+)', r'(\d+)', cap_re)
if r'(\d+)' in cap_re:
return cap_re
warnings.warn('Missing numerics in %r, %r' % (cap, cap_re))
return None # no such capability
def get_movement_sequence_patterns(term):
""" Build and return set of regexp for capabilities of ``term`` known
to cause movement.
"""
bnc = functools.partial(_build_numeric_capability, term)
return set([
# carriage_return
re.escape(term.cr),
# column_address: Horizontal position, absolute
bnc(cap='hpa'),
# row_address: Vertical position #1 absolute
bnc(cap='vpa'),
# cursor_address: Move to row #1 columns #2
bnc(cap='cup', nparams=2),
# cursor_down: Down one line
re.escape(term.cud1),
# cursor_home: Home cursor (if no cup)
re.escape(term.home),
# cursor_left: Move left one space
re.escape(term.cub1),
# cursor_right: Non-destructive space (move right one space)
re.escape(term.cuf1),
# cursor_up: Up one line
re.escape(term.cuu1),
# param_down_cursor: Down #1 lines
bnc(cap='cud', optional=True),
# restore_cursor: Restore cursor to position of last save_cursor
re.escape(term.rc),
# clear_screen: clear screen and home cursor
re.escape(term.clear),
# enter/exit_fullscreen: switch to alternate screen buffer
re.escape(term.enter_fullscreen),
re.escape(term.exit_fullscreen),
# forward cursor
term._cuf,
# backward cursor
term._cub,
])
def get_wontmove_sequence_patterns(term):
""" Build and return set of regexp for capabilities of ``term`` known
not to cause any movement.
"""
bnc = functools.partial(_build_numeric_capability, term)
bna = functools.partial(_build_any_numeric_capability, term)
return list([
# print_screen: Print contents of screen
re.escape(term.mc0),
# prtr_off: Turn off printer
re.escape(term.mc4),
# prtr_on: Turn on printer
re.escape(term.mc5),
# save_cursor: Save current cursor position (P)
re.escape(term.sc),
# set_tab: Set a tab in every row, current columns
re.escape(term.hts),
# enter_bold_mode: Turn on bold (extra bright) mode
re.escape(term.bold),
# enter_standout_mode
re.escape(term.standout),
# enter_subscript_mode
re.escape(term.subscript),
# enter_superscript_mode
re.escape(term.superscript),
# enter_underline_mode: Begin underline mode
re.escape(term.underline),
# enter_blink_mode: Turn on blinking
re.escape(term.blink),
# enter_dim_mode: Turn on half-bright mode
re.escape(term.dim),
# cursor_invisible: Make cursor invisible
re.escape(term.civis),
# cursor_visible: Make cursor very visible
re.escape(term.cvvis),
# cursor_normal: Make cursor appear normal (undo civis/cvvis)
re.escape(term.cnorm),
# clear_all_tabs: Clear all tab stops
re.escape(term.tbc),
# change_scroll_region: Change region to line #1 to line #2
bnc(cap='csr', nparams=2),
# clr_bol: Clear to beginning of line
re.escape(term.el1),
# clr_eol: Clear to end of line
re.escape(term.el),
# clr_eos: Clear to end of screen
re.escape(term.clear_eos),
# delete_character: Delete character
re.escape(term.dch1),
# delete_line: Delete line (P*)
re.escape(term.dl1),
# erase_chars: Erase #1 characters
bnc(cap='ech'),
# insert_line: Insert line (P*)
re.escape(term.il1),
# parm_dch: Delete #1 characters
bnc(cap='dch'),
# parm_delete_line: Delete #1 lines
bnc(cap='dl'),
# exit_alt_charset_mode: End alternate character set (P)
re.escape(term.rmacs),
# exit_am_mode: Turn off automatic margins
re.escape(term.rmam),
# exit_attribute_mode: Turn off all attributes
re.escape(term.sgr0),
# exit_ca_mode: Strings to end programs using cup
re.escape(term.rmcup),
# exit_insert_mode: Exit insert mode
re.escape(term.rmir),
# exit_standout_mode: Exit standout mode
re.escape(term.rmso),
# exit_underline_mode: Exit underline mode
re.escape(term.rmul),
# flash_hook: Flash switch hook
re.escape(term.hook),
# flash_screen: Visible bell (may not move cursor)
re.escape(term.flash),
# keypad_local: Leave 'keyboard_transmit' mode
re.escape(term.rmkx),
# keypad_xmit: Enter 'keyboard_transmit' mode
re.escape(term.smkx),
# meta_off: Turn off meta mode
re.escape(term.rmm),
# meta_on: Turn on meta mode (8th-bit on)
re.escape(term.smm),
# orig_pair: Set default pair to its original value
re.escape(term.op),
# parm_ich: Insert #1 characters
bnc(cap='ich'),
# parm_index: Scroll forward #1
bnc(cap='indn'),
# parm_insert_line: Insert #1 lines
bnc(cap='il'),
# erase_chars: Erase #1 characters
bnc(cap='ech'),
# parm_rindex: Scroll back #1 lines
bnc(cap='rin'),
# parm_up_cursor: Up #1 lines
bnc(cap='cuu'),
# scroll_forward: Scroll text up (P)
re.escape(term.ind),
# scroll_reverse: Scroll text down (P)
re.escape(term.rev),
# tab: Tab to next 8-space hardware tab stop
re.escape(term.ht),
# set_a_background: Set background color to #1, using ANSI escape
bna(cap='setab', num=1),
bna(cap='setab', num=(term.number_of_colors - 1)),
# set_a_foreground: Set foreground color to #1, using ANSI escape
bna(cap='setaf', num=1),
bna(cap='setaf', num=(term.number_of_colors - 1)),
] + [
# set_attributes: Define video attributes #1-#9 (PG9)
# ( not *exactly* legal, being extra forgiving. )
bna(cap='sgr', nparams=_num) for _num in range(1, 10)
# reset_{1,2,3}string: Reset string
] + list(map(re.escape, (term.r1, term.r2, term.r3,))))
def init_sequence_patterns(term):
"""Given a Terminal instance, ``term``, this function processes
and parses several known terminal capabilities, and builds and
returns a dictionary database of regular expressions, which may
be re-attached to the terminal by attributes of the same key-name:
``_re_will_move``
any sequence matching this pattern will cause the terminal
cursor to move (such as *term.home*).
``_re_wont_move``
any sequence matching this pattern will not cause the cursor
to move (such as *term.bold*).
``_re_cuf``
regular expression that matches term.cuf(N) (move N characters forward),
or None if temrinal is without cuf sequence.
``_cuf1``
*term.cuf1* sequence (cursor forward 1 character) as a static value.
``_re_cub``
regular expression that matches term.cub(N) (move N characters backward),
or None if terminal is without cub sequence.
``_cub1``
*term.cuf1* sequence (cursor backward 1 character) as a static value.
These attributes make it possible to perform introspection on strings
containing sequences generated by this terminal, to determine the
printable length of a string.
"""
if term.kind in _BINTERM_UNSUPPORTED:
warnings.warn(_BINTERM_UNSUPPORTED_MSG.format(term.kind))
# Build will_move, a list of terminal capabilities that have
# indeterminate effects on the terminal cursor position.
_will_move = set()
if term.does_styling:
_will_move = _merge_sequences(get_movement_sequence_patterns(term))
# Build wont_move, a list of terminal capabilities that mainly affect
# video attributes, for use with measure_length().
_wont_move = set()
if term.does_styling:
_wont_move = _merge_sequences(get_wontmove_sequence_patterns(term))
_wont_move += [
# some last-ditch match efforts; well, xterm and aixterm is going
# to throw \x1b(B and other oddities all around, so, when given
# input such as ansi art (see test using wall.ans), and well,
# theres no reason a vt220 terminal shouldn't be able to recognize
# blue_on_red, even if it didn't cause it to be generated. these
# are final "ok, i will match this, anyway"
re.escape(u'\x1b') + r'\[(\d+)m',
re.escape(u'\x1b') + r'\[(\d+)\;(\d+)m',
re.escape(u'\x1b') + r'\[(\d+)\;(\d+)\;(\d+)m',
re.escape(u'\x1b') + r'\[(\d+)\;(\d+)\;(\d+)\;(\d+)m',
re.escape(u'\x1b(B'),
]
# compile as regular expressions, OR'd.
_re_will_move = re.compile('(%s)' % ('|'.join(_will_move)))
_re_wont_move = re.compile('(%s)' % ('|'.join(_wont_move)))
# static pattern matching for horizontal_distance(ucs, term)
bnc = functools.partial(_build_numeric_capability, term)
# parm_right_cursor: Move #1 characters to the right
_cuf = bnc(cap='cuf', optional=True)
_re_cuf = re.compile(_cuf) if _cuf else None
# cursor_right: Non-destructive space (move right one space)
_cuf1 = term.cuf1
# parm_left_cursor: Move #1 characters to the left
_cub = bnc(cap='cub', optional=True)
_re_cub = re.compile(_cub) if _cub else None
# cursor_left: Move left one space
_cub1 = term.cub1
return {'_re_will_move': _re_will_move,
'_re_wont_move': _re_wont_move,
'_re_cuf': _re_cuf,
'_re_cub': _re_cub,
'_cuf1': _cuf1,
'_cub1': _cub1, }
class SequenceTextWrapper(textwrap.TextWrapper):
def __init__(self, width, term, **kwargs):
self.term = term
textwrap.TextWrapper.__init__(self, width, **kwargs)
def _wrap_chunks(self, chunks):
"""
escape-sequence aware variant of _wrap_chunks. Though
movement sequences, such as term.left() are certainly not
honored, sequences such as term.bold() are, and are not
broken mid-sequence.
"""
lines = []
if self.width <= 0 or not isinstance(self.width, int):
raise ValueError("invalid width %r(%s) (must be integer > 0)" % (
self.width, type(self.width)))
term = self.term
drop_whitespace = not hasattr(self, 'drop_whitespace'
) or self.drop_whitespace
chunks.reverse()
while chunks:
cur_line = []
cur_len = 0
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
width = self.width - len(indent)
if drop_whitespace and (
Sequence(chunks[-1], term).strip() == '' and lines):
del chunks[-1]
while chunks:
chunk_len = Sequence(chunks[-1], term).length()
if cur_len + chunk_len <= width:
cur_line.append(chunks.pop())
cur_len += chunk_len
else:
break
if chunks and Sequence(chunks[-1], term).length() > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
if drop_whitespace and (
cur_line and Sequence(cur_line[-1], term).strip() == ''):
del cur_line[-1]
if cur_line:
lines.append(indent + u''.join(cur_line))
return lines
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Handle a chunk of text (most likely a word, not whitespace) that
is too long to fit in any line.
"""
# Figure out when indent is larger than the specified width, and make
# sure at least one character is stripped off on every pass
if width < 1:
space_left = 1
else:
space_left = width - cur_len
# If we're allowed to break long words, then do so: put as much
# of the next chunk onto the current line as will fit.
if self.break_long_words:
term = self.term
chunk = reversed_chunks[-1]
nxt = 0
for idx in range(0, len(chunk)):
if idx == nxt:
# at sequence, point beyond it,
nxt = idx + measure_length(chunk[idx:], term)
if nxt <= idx:
# point beyond next sequence, if any,
# otherwise point to next character
nxt = idx + measure_length(chunk[idx:], term) + 1
if Sequence(chunk[:nxt], term).length() > space_left:
break
else:
# our text ends with a sequence, such as in text
# u'!\x1b(B\x1b[m', set index at at end (nxt)
idx = nxt
cur_line.append(chunk[:idx])
reversed_chunks[-1] = chunk[idx:]
# Otherwise, we have to preserve the long word intact. Only add
# it to the current line if there's nothing already there --
# that minimizes how much we violate the width constraint.
elif not cur_line:
cur_line.append(reversed_chunks.pop())
# If we're not allowed to break long words, and there's already
# text on the current line, do nothing. Next time through the
# main loop of _wrap_chunks(), we'll wind up here again, but
# cur_len will be zero, so the next line will be entirely
# devoted to the long word that we can't handle right now.
SequenceTextWrapper.__doc__ = textwrap.TextWrapper.__doc__
class Sequence(text_type):
"""
This unicode-derived class understands the effect of escape sequences
of printable length, allowing a properly implemented .rjust(), .ljust(),
.center(), and .len()
"""
def __new__(cls, sequence_text, term):
"""Sequence(sequence_text, term) -> unicode object
:arg sequence_text: A string containing sequences.
:arg term: Terminal instance this string was created with.
"""
new = text_type.__new__(cls, sequence_text)
new._term = term
return new
def ljust(self, width, fillchar=u' '):
"""S.ljust(width, fillchar) -> unicode
Returns string derived from unicode string ``S``, left-adjusted
by trailing whitespace padding ``fillchar``."""
rightside = fillchar * int((max(0.0, float(width - self.length())))
/ float(len(fillchar)))
return u''.join((self, rightside))
def rjust(self, width, fillchar=u' '):
"""S.rjust(width, fillchar=u'') -> unicode
Returns string derived from unicode string ``S``, right-adjusted
by leading whitespace padding ``fillchar``."""
leftside = fillchar * int((max(0.0, float(width - self.length())))
/ float(len(fillchar)))
return u''.join((leftside, self))
def center(self, width, fillchar=u' '):
"""S.center(width, fillchar=u'') -> unicode
Returns string derived from unicode string ``S``, centered
and surrounded with whitespace padding ``fillchar``."""
split = max(0.0, float(width) - self.length()) / 2
leftside = fillchar * int((max(0.0, math.floor(split)))
/ float(len(fillchar)))
rightside = fillchar * int((max(0.0, math.ceil(split)))
/ float(len(fillchar)))
return u''.join((leftside, self, rightside))
def length(self):
"""S.length() -> int
Returns printable length of unicode string ``S`` that may contain
terminal sequences.
Although accounted for, strings containing sequences such as
``term.clear`` will not give accurate returns, it is not
considered lengthy (a length of 0). Combining characters,
are also not considered lengthy.
Strings containing ``term.left`` or ``\b`` will cause "overstrike",
but a length less than 0 is not ever returned. So ``_\b+`` is a
length of 1 (``+``), but ``\b`` is simply a length of 0.
Some characters may consume more than one cell, mainly those CJK
Unified Ideographs (Chinese, Japanese, Korean) defined by Unicode
as half or full-width characters.
For example:
>>> from blessed import Terminal
>>> from blessed.sequences import Sequence
>>> term = Terminal()
>>> Sequence(term.clear + term.red(u'コンニチハ')).length()
5
"""
# because combining characters may return -1, "clip" their length to 0.
clip = functools.partial(max, 0)
return sum(clip(wcwidth.wcwidth(w_char))
for w_char in self.strip_seqs())
def strip(self, chars=None):
"""S.strip([chars]) -> unicode
Return a copy of the string S with terminal sequences removed, and
leading and trailing whitespace removed.
If chars is given and not None, remove characters in chars instead.
"""
return self.strip_seqs().strip(chars)
def lstrip(self, chars=None):
"""S.lstrip([chars]) -> unicode
Return a copy of the string S with terminal sequences and leading
whitespace removed.
If chars is given and not None, remove characters in chars instead.
"""
return self.strip_seqs().lstrip(chars)
def rstrip(self, chars=None):
"""S.rstrip([chars]) -> unicode
Return a copy of the string S with terminal sequences and trailing
whitespace removed.
If chars is given and not None, remove characters in chars instead.
"""
return self.strip_seqs().rstrip(chars)
def strip_seqs(self):
"""S.strip_seqs() -> unicode
Return a string without sequences for a string that contains
sequences for the Terminal with which they were created.
Where sequence ``move_right(n)`` is detected, it is replaced with
``n * u' '``, and where ``move_left()`` or ``\\b`` is detected,
those last-most characters are destroyed.
All other sequences are simply removed. An example,
>>> from blessed import Terminal
>>> from blessed.sequences import Sequence
>>> term = Terminal()
>>> Sequence(term.clear + term.red(u'test')).strip_seqs()
u'test'
"""
# nxt: points to first character beyond current escape sequence.
# width: currently estimated display length.
input = self.padd()
outp = u''
nxt = 0
for idx in range(0, len(input)):
if idx == nxt:
# at sequence, point beyond it,
nxt = idx + measure_length(input[idx:], self._term)
if nxt <= idx:
# append non-sequence to outp,
outp += input[idx]
# point beyond next sequence, if any,
# otherwise point to next character
nxt = idx + measure_length(input[idx:], self._term) + 1
return outp
def padd(self):
"""S.padd() -> unicode
Make non-destructive space or backspace into destructive ones.
Where sequence ``move_right(n)`` is detected, it is replaced with
``n * u' '``. Where sequence ``move_left(n)`` or ``\\b`` is
detected, those last-most characters are destroyed.
"""
outp = u''
nxt = 0
for idx in range(0, text_type.__len__(self)):
width = horizontal_distance(self[idx:], self._term)
if width != 0:
nxt = idx + measure_length(self[idx:], self._term)
if width > 0:
outp += u' ' * width
elif width < 0:
outp = outp[:width]
if nxt <= idx:
outp += self[idx]
nxt = idx + 1
return outp
def measure_length(ucs, term):
"""measure_length(S, term) -> int
Returns non-zero for string ``S`` that begins with a terminal sequence,
that is: the width of the first unprintable sequence found in S. For use
as a *next* pointer to skip past sequences. If string ``S`` is not a
sequence, 0 is returned.
A sequence may be a typical terminal sequence beginning with Escape
(``\x1b``), especially a Control Sequence Initiator (``CSI``, ``\x1b[``,
...), or those of ``\a``, ``\b``, ``\r``, ``\n``, ``\xe0`` (shift in),
``\x0f`` (shift out). They do not necessarily have to begin with CSI, they
need only match the capabilities of attributes ``_re_will_move`` and
``_re_wont_move`` of terminal ``term``.
"""
# simple terminal control characters,
ctrl_seqs = u'\a\b\r\n\x0e\x0f'
if any([ucs.startswith(_ch) for _ch in ctrl_seqs]):
return 1
# known multibyte sequences,
matching_seq = term and (
term._re_will_move.match(ucs) or
term._re_wont_move.match(ucs) or
term._re_cub and term._re_cub.match(ucs) or
term._re_cuf and term._re_cuf.match(ucs)
)
if matching_seq:
start, end = matching_seq.span()
return end
# none found, must be printable!
return 0
def termcap_distance(ucs, cap, unit, term):
"""termcap_distance(S, cap, unit, term) -> int
Match horizontal distance by simple ``cap`` capability name, ``cub1`` or
``cuf1``, with string matching the sequences identified by Terminal
instance ``term`` and a distance of ``unit`` *1* or *-1*, for right and
left, respectively.
Otherwise, by regular expression (using dynamic regular expressions built
using ``cub(n)`` and ``cuf(n)``. Failing that, any of the standard SGR
sequences (``\033[C``, ``\033[D``, ``\033[nC``, ``\033[nD``).
Returns 0 if unmatched.
"""
assert cap in ('cuf', 'cub')
# match cub1(left), cuf1(right)
one = getattr(term, '_%s1' % (cap,))
if one and ucs.startswith(one):
return unit
# match cub(n), cuf(n) using regular expressions
re_pattern = getattr(term, '_re_%s' % (cap,))
_dist = re_pattern and re_pattern.match(ucs)
if _dist:
return unit * int(_dist.group(1))
return 0
def horizontal_distance(ucs, term):
"""horizontal_distance(S, term) -> int
Returns Integer ``<n>`` in SGR sequence of form ``<ESC>[<n>C``
(T.move_right(n)), or ``-(n)`` in sequence of form ``<ESC>[<n>D``
(T.move_left(n)). Returns -1 for backspace (0x08), Otherwise 0.
Tabstop (``\t``) cannot be correctly calculated, as the relative column
position cannot be determined: 8 is always (and, incorrectly) returned.
"""
if ucs.startswith('\b'):
return -1
elif ucs.startswith('\t'):
# As best as I can prove it, a tabstop is always 8 by default.
# Though, given that blessings is:
#
# 1. unaware of the output device's current cursor position, and
# 2. unaware of the location the callee may chose to output any
# given string,
#
# It is not possible to determine how many cells any particular
# \t would consume on the output device!
return 8
return (termcap_distance(ucs, 'cub', -1, term) or
termcap_distance(ucs, 'cuf', 1, term) or
0)
|
AccelAI/accel.ai
|
flask-aws/lib/python2.7/site-packages/blessed/sequences.py
|
Python
|
mit
| 26,038
|
# Copyright (c) 2014 Tycho Andersen
# Copyright (c) 2014 dequis
# Copyright (c) 2014-2015 Joseph Razik
# Copyright (c) 2014 Sean Vig
# Copyright (c) 2015 reus
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module define a widget that displays icons to launch softwares or commands
when clicked -- a launchbar.
Only png icon files are displayed, not xpm because cairo doesn't support
loading of xpm file.
The order of displaying (from left to right) is in the order of the list.
If no icon was found for the name provided and if default_icon is set to None
then the name is printed instead. If default_icon is defined then this icon is
displayed instead.
To execute a software:
- ('thunderbird', 'thunderbird -safe-mode', 'launch thunderbird in safe mode')
To execute a python command in qtile, begin with by 'qshell:'
- ('logout', 'qshell:self.qtile.cmd_shutdown()', 'logout from qtile')
"""
from libqtile import bar
from libqtile.log_utils import logger
from libqtile.widget import base
import os.path
import cairocffi
from xdg.IconTheme import getIconPath
class LaunchBar(base._Widget):
"""A widget that display icons to launch the associated command
Parameters
==========
progs :
a list of tuples ``(software_name, command_to_execute, comment)``, for
example::
('thunderbird', 'thunderbird -safe-mode', 'launch thunderbird in safe mode')
('logout', 'qshell:self.qtile.cmd_shutdown()', 'logout from qtile')
"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
('padding', 2, 'Padding between icons'),
('default_icon', '/usr/share/icons/oxygen/256x256/mimetypes/'
'application-x-executable.png', 'Default icon not found'),
]
def __init__(self, progs=None, width=bar.CALCULATED, **config):
base._Widget.__init__(self, width, **config)
if progs is None:
progs = []
self.add_defaults(LaunchBar.defaults)
self.surfaces = {}
self.icons_files = {}
self.icons_widths = {}
self.icons_offsets = {}
# For now, ignore the comments but may be one day it will be useful
self.progs = dict(enumerate([{'name': prog[0], 'cmd': prog[1],
'comment': prog[2] if len(prog) > 2 else
None} for prog in progs]))
self.progs_name = set([prog['name'] for prog in self.progs.values()])
self.length_type = bar.STATIC
self.length = 0
def _configure(self, qtile, pbar):
base._Widget._configure(self, qtile, pbar)
self.lookup_icons()
self.setup_images()
self.length = self.calculate_length()
def setup_images(self):
""" Create image structures for each icon files. """
for img_name, iconfile in self.icons_files.items():
if iconfile is None:
logger.warning(
'No icon found for application "%s" (%s) switch to text mode',
img_name, iconfile)
# if no icon is found and no default icon was set, we just
# print the name, based on a textbox.
textbox = base._TextBox()
textbox._configure(self.qtile, self.bar)
textbox.layout = self.drawer.textlayout(
textbox.text,
textbox.foreground,
textbox.font,
textbox.fontsize,
textbox.fontshadow,
markup=textbox.markup,
)
# the name will be displayed
textbox.text = img_name
textbox.calculate_length()
self.icons_widths[img_name] = textbox.width
self.surfaces[img_name] = textbox
continue
else:
try:
img = cairocffi.ImageSurface.create_from_png(iconfile)
except cairocffi.Error:
logger.exception('Error loading icon for application "%s" (%s)', img_name, iconfile)
return
input_width = img.get_width()
input_height = img.get_height()
sp = input_height / (self.bar.height - 4)
width = int(input_width / sp)
imgpat = cairocffi.SurfacePattern(img)
scaler = cairocffi.Matrix()
scaler.scale(sp, sp)
scaler.translate(self.padding * -1, -2)
imgpat.set_matrix(scaler)
imgpat.set_filter(cairocffi.FILTER_BEST)
self.surfaces[img_name] = imgpat
self.icons_widths[img_name] = width
def _lookup_icon(self, name):
""" Search for the icon corresponding to one command. """
self.icons_files[name] = None
# if the software_name is directly an absolute path icon file
if os.path.isabs(name):
# name start with '/' thus it's an absolute path
root, ext = os.path.splitext(name)
if ext == '.png':
self.icons_files[name] = name if os.path.isfile(name) else None
else:
# try to add the extension
self.icons_files[name] = name + '.png' if os.path.isfile(name + '.png') else None
else:
self.icons_files[name] = getIconPath(name)
# no search method found an icon, so default icon
if self.icons_files[name] is None:
self.icons_files[name] = self.default_icon
def lookup_icons(self):
""" Search for the icons corresponding to the commands to execute. """
if self.default_icon is not None:
if not os.path.isfile(self.default_icon):
# if the default icon provided is not found, switch to
# text mode
self.default_icon = None
for name in self.progs_name:
self._lookup_icon(name)
def get_icon_in_position(self, x, y):
""" Determine which icon is clicked according to its position. """
for i in self.progs:
if x < (self.icons_offsets[i] +
self.icons_widths[self.progs[i]['name']] +
self.padding / 2):
return i
def button_press(self, x, y, button):
""" Launch the associated command to the clicked icon. """
if button == 1:
icon = self.get_icon_in_position(x, y)
if icon is not None:
cmd = self.progs[icon]['cmd']
if cmd.startswith('qshell:'):
exec(cmd[4:].lstrip())
else:
self.qtile.cmd_spawn(cmd)
self.draw()
def draw(self):
""" Draw the icons in the widget. """
self.drawer.clear(self.background or self.bar.background)
xoffset = 0
for i in sorted(self.progs.keys()):
self.icons_offsets[i] = xoffset + self.padding
name = self.progs[i]['name']
icon_width = self.icons_widths[name]
self.drawer.ctx.move_to(self.offset + xoffset, icon_width)
self.drawer.clear(self.background or self.bar.background)
if isinstance(self.surfaces[name], base._TextBox):
# display the name if no icon was found and no default icon
textbox = self.surfaces[name]
textbox.layout.draw(
self.padding + textbox.actual_padding,
int((self.bar.height - textbox.layout.height) / 2.0) + 1
)
else:
# display an icon
self.drawer.ctx.set_source(self.surfaces[name])
self.drawer.ctx.paint()
self.drawer.draw(offsetx=self.offset + xoffset,
width=icon_width + self.padding)
xoffset += icon_width + self.padding
def calculate_length(self):
""" Compute the width of the widget according to each icon width. """
return sum(self.icons_widths[prg['name']] for prg in self.progs.values()) \
+ self.padding * (len(self.progs) + 1)
|
frostidaho/qtile
|
libqtile/widget/launchbar.py
|
Python
|
mit
| 9,146
|
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
DEBUG = True
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'REPLACE_ME_BEFORE_PRODUCTION'
ALLOWED_HOSTS = ['.spades.com']
AUTH_USER_MODEL = 'deck.User'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'deck',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'spades.urls'
WSGI_APPLICATION = 'spades.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'database'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'MST'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = BASE_DIR+'/media/'
MEDIA_URL = '/media/'
|
gavinmcgimpsey/deckofcards
|
spades/settings.py
|
Python
|
mit
| 1,928
|
"""Provides all the generic data related to the personal information."""
from typing import Tuple
BLOOD_GROUPS = (
"O+",
"A+",
"B+",
"AB+",
"O−",
"A−",
"B−",
"AB−",
)
GENDER_SYMBOLS: Tuple[str, str, str] = (
"♂",
"♀",
"⚲",
)
USERNAMES = [
"aaa",
"aaron",
"abandoned",
"abc",
"aberdeen",
"abilities",
"ability",
"able",
"aboriginal",
"abortion",
"about",
"above",
"abraham",
"abroad",
"abs",
"absence",
"absent",
"absolute",
"absolutely",
"absorption",
"abstract",
"abstracts",
"abu",
"abuse",
"academic",
"academics",
"academy",
"acc",
"accent",
"accept",
"acceptable",
"acceptance",
"accepted",
"accepting",
"accepts",
"access",
"accessed",
"accessibility",
"accessible",
"accessing",
"accessories",
"accessory",
"accident",
"accidents",
"accommodate",
"accommodation",
"accommodations",
"accompanied",
"accompanying",
"accomplish",
"accomplished",
"accordance",
"according",
"accordingly",
"account",
"accountability",
"accounting",
"accounts",
"accreditation",
"accredited",
"accuracy",
"accurate",
"accurately",
"accused",
"acdbentity",
"ace",
"acer",
"achieve",
"achieved",
"achievement",
"achievements",
"achieving",
"acid",
"acids",
"acknowledge",
"acknowledged",
"acm",
"acne",
"acoustic",
"acquire",
"acquired",
"acquisition",
"acquisitions",
"acre",
"acres",
"acrobat",
"across",
"acrylic",
"act",
"acting",
"action",
"actions",
"activated",
"activation",
"active",
"actively",
"activists",
"activities",
"activity",
"actor",
"actors",
"actress",
"acts",
"actual",
"actually",
"acute",
"ada",
"adam",
"adams",
"adaptation",
"adapted",
"adapter",
"adapters",
"adaptive",
"adaptor",
"add",
"added",
"addiction",
"adding",
"addition",
"additional",
"additionally",
"additions",
"address",
"addressed",
"addresses",
"addressing",
"adds",
"adelaide",
"adequate",
"adidas",
"adipex",
"adjacent",
"adjust",
"adjustable",
"adjusted",
"adjustment",
"adjustments",
"admin",
"administered",
"administration",
"administrative",
"administrator",
"administrators",
"admission",
"admissions",
"admit",
"admitted",
"adobe",
"adolescent",
"adopt",
"adopted",
"adoption",
"adrian",
"ads",
"adsl",
"adult",
"adults",
"advance",
"advanced",
"advancement",
"advances",
"advantage",
"advantages",
"adventure",
"adventures",
"adverse",
"advert",
"advertise",
"advertisement",
"advertisements",
"advertiser",
"advertisers",
"advertising",
"advice",
"advise",
"advised",
"advisor",
"advisors",
"advisory",
"advocacy",
"advocate",
"adware",
"aerial",
"aerospace",
"affair",
"affairs",
"affect",
"affected",
"affecting",
"affects",
"affiliate",
"affiliated",
"affiliates",
"affiliation",
"afford",
"affordable",
"afghanistan",
"afraid",
"africa",
"african",
"after",
"afternoon",
"afterwards",
"again",
"against",
"age",
"aged",
"agencies",
"agency",
"agenda",
"agent",
"agents",
"ages",
"aggregate",
"aggressive",
"aging",
"ago",
"agree",
"agreed",
"agreement",
"agreements",
"agrees",
"agricultural",
"agriculture",
"ahead",
"aid",
"aids",
"aim",
"aimed",
"aims",
"air",
"aircraft",
"airfare",
"airline",
"airlines",
"airplane",
"airport",
"airports",
"aka",
"ala",
"alabama",
"alan",
"alarm",
"alaska",
"albania",
"albany",
"albert",
"alberta",
"album",
"albums",
"albuquerque",
"alcohol",
"alert",
"alerts",
"alex",
"alexander",
"alexandria",
"alfred",
"algebra",
"algeria",
"algorithm",
"algorithms",
"ali",
"alias",
"alice",
"alien",
"align",
"alignment",
"alike",
"alive",
"all",
"allah",
"allan",
"alleged",
"allen",
"allergy",
"alliance",
"allied",
"allocated",
"allocation",
"allow",
"allowance",
"allowed",
"allowing",
"allows",
"alloy",
"almost",
"alone",
"along",
"alot",
"alpha",
"alphabetical",
"alpine",
"already",
"also",
"alt",
"alter",
"altered",
"alternate",
"alternative",
"alternatively",
"alternatives",
"although",
"alto",
"aluminium",
"aluminum",
"alumni",
"always",
"amanda",
"amateur",
"amazing",
"amazon",
"ambassador",
"amber",
"ambien",
"ambient",
"amd",
"amend",
"amended",
"amendment",
"amendments",
"amenities",
"america",
"american",
"americans",
"americas",
"amino",
"among",
"amongst",
"amount",
"amounts",
"amp",
"ampland",
"amplifier",
"amsterdam",
"amy",
"ana",
"anaheim",
"analog",
"analysis",
"analyst",
"analysts",
"analytical",
"analyze",
"analyzed",
"analyzes",
"anatomy",
"anchor",
"ancient",
"and",
"andale",
"anderson",
"andorra",
"andrea",
"andreas",
"andrew",
"andrews",
"andy",
"angel",
"angela",
"angeles",
"angels",
"anger",
"angle",
"angola",
"angry",
"animal",
"animals",
"animated",
"animation",
"anime",
"ann",
"anna",
"anne",
"annex",
"annie",
"anniversary",
"annotated",
"annotation",
"announce",
"announced",
"announcement",
"announcements",
"announces",
"annoying",
"annual",
"annually",
"anonymous",
"another",
"answer",
"answered",
"answering",
"answers",
"ant",
"antarctica",
"antenna",
"anthony",
"anthropology",
"anti",
"antibodies",
"antibody",
"anticipated",
"antigua",
"antique",
"antiques",
"antivirus",
"antonio",
"anxiety",
"any",
"anybody",
"anymore",
"anyone",
"anything",
"anytime",
"anyway",
"anywhere",
"aol",
"apache",
"apart",
"apartment",
"apartments",
"api",
"apnic",
"apollo",
"app",
"apparatus",
"apparel",
"apparent",
"apparently",
"appeal",
"appeals",
"appear",
"appearance",
"appeared",
"appearing",
"appears",
"appendix",
"apple",
"appliance",
"appliances",
"applicable",
"applicant",
"applicants",
"application",
"applications",
"applied",
"applies",
"apply",
"applying",
"appointed",
"appointment",
"appointments",
"appraisal",
"appreciate",
"appreciated",
"appreciation",
"approach",
"approaches",
"appropriate",
"appropriations",
"approval",
"approve",
"approved",
"approx",
"approximate",
"approximately",
"apps",
"apr",
"april",
"apt",
"aqua",
"aquarium",
"aquatic",
"arab",
"arabia",
"arabic",
"arbitrary",
"arbitration",
"arbor",
"arc",
"arcade",
"arch",
"architect",
"architects",
"architectural",
"architecture",
"archive",
"archived",
"archives",
"arctic",
"are",
"area",
"areas",
"arena",
"arg",
"argentina",
"argue",
"argued",
"argument",
"arguments",
"arise",
"arising",
"arizona",
"arkansas",
"arlington",
"arm",
"armed",
"armenia",
"armor",
"arms",
"armstrong",
"army",
"arnold",
"around",
"arrange",
"arranged",
"arrangement",
"arrangements",
"array",
"arrest",
"arrested",
"arrival",
"arrivals",
"arrive",
"arrived",
"arrives",
"arrow",
"art",
"arthritis",
"arthur",
"article",
"articles",
"artificial",
"artist",
"artistic",
"artists",
"arts",
"artwork",
"aruba",
"asbestos",
"ascii",
"ash",
"ashley",
"asia",
"asian",
"aside",
"asin",
"ask",
"asked",
"asking",
"asks",
"asn",
"asp",
"aspect",
"aspects",
"assault",
"assembled",
"assembly",
"assess",
"assessed",
"assessing",
"assessment",
"assessments",
"asset",
"assets",
"assign",
"assigned",
"assignment",
"assignments",
"assist",
"assistance",
"assistant",
"assisted",
"assists",
"associate",
"associated",
"associates",
"association",
"associations",
"assume",
"assumed",
"assumes",
"assuming",
"assumption",
"assumptions",
"assurance",
"assure",
"assured",
"asthma",
"astrology",
"astronomy",
"asus",
"asylum",
"ata",
"ate",
"athens",
"athletes",
"athletic",
"athletics",
"ati",
"atlanta",
"atlantic",
"atlas",
"atm",
"atmosphere",
"atmospheric",
"atom",
"atomic",
"attach",
"attached",
"attachment",
"attachments",
"attack",
"attacked",
"attacks",
"attempt",
"attempted",
"attempting",
"attempts",
"attend",
"attendance",
"attended",
"attending",
"attention",
"attitude",
"attitudes",
"attorney",
"attorneys",
"attract",
"attraction",
"attractions",
"attractive",
"attribute",
"attributes",
"auburn",
"auckland",
"auction",
"auctions",
"aud",
"audi",
"audience",
"audio",
"audit",
"auditor",
"aug",
"august",
"aurora",
"aus",
"austin",
"australia",
"australian",
"austria",
"authentic",
"authentication",
"author",
"authorities",
"authority",
"authorization",
"authorized",
"authors",
"auto",
"automated",
"automatic",
"automatically",
"automation",
"automobile",
"automobiles",
"automotive",
"autos",
"autumn",
"availability",
"available",
"avatar",
"ave",
"avenue",
"average",
"avg",
"avi",
"aviation",
"avoid",
"avoiding",
"avon",
"award",
"awarded",
"awards",
"aware",
"awareness",
"away",
"awesome",
"awful",
"axis",
"aye",
"azerbaijan",
"babe",
"babes",
"babies",
"baby",
"bachelor",
"back",
"backed",
"background",
"backgrounds",
"backing",
"backup",
"bacon",
"bacteria",
"bacterial",
"bad",
"badge",
"badly",
"bag",
"baghdad",
"bags",
"bahamas",
"bahrain",
"bailey",
"baker",
"baking",
"balance",
"balanced",
"bald",
"bali",
"ball",
"ballet",
"balloon",
"ballot",
"baltimore",
"ban",
"banana",
"band",
"bands",
"bandwidth",
"bang",
"bangkok",
"bangladesh",
"bank",
"banking",
"bankruptcy",
"banks",
"banned",
"banner",
"banners",
"baptist",
"bar",
"barbados",
"barbara",
"barbie",
"barcelona",
"bare",
"barely",
"bargain",
"bargains",
"barn",
"barnes",
"barrel",
"barrier",
"barriers",
"barry",
"bars",
"base",
"baseball",
"based",
"baseline",
"basement",
"basename",
"bases",
"basic",
"basically",
"basics",
"basin",
"basis",
"basket",
"basketball",
"baskets",
"bass",
"bat",
"batch",
"bath",
"bathroom",
"bathrooms",
"baths",
"batman",
"batteries",
"battery",
"battle",
"battlefield",
"bay",
"bbc",
"bbs",
"beach",
"beaches",
"beads",
"beam",
"bean",
"beans",
"bear",
"bearing",
"bears",
"beast",
"beastality",
"beat",
"beatles",
"beats",
"beautiful",
"beautifully",
"beauty",
"beaver",
"became",
"because",
"become",
"becomes",
"becoming",
"bed",
"bedding",
"bedford",
"bedroom",
"bedrooms",
"beds",
"bee",
"beef",
"been",
"beer",
"before",
"began",
"begin",
"beginner",
"beginners",
"beginning",
"begins",
"begun",
"behalf",
"behavior",
"behavioral",
"behind",
"beijing",
"being",
"beings",
"belarus",
"belfast",
"belgium",
"belief",
"beliefs",
"believe",
"believed",
"believes",
"belize",
"belkin",
"bell",
"belle",
"belly",
"belong",
"belongs",
"below",
"belt",
"belts",
"ben",
"bench",
"benchmark",
"bend",
"beneath",
"beneficial",
"benefit",
"benefits",
"benjamin",
"bennett",
"bent",
"benz",
"berkeley",
"berlin",
"bermuda",
"bernard",
"berry",
"beside",
"besides",
"best",
"bestsellers",
"bet",
"beta",
"beth",
"better",
"betting",
"betty",
"between",
"beverage",
"beverages",
"beverly",
"beyond",
"bhutan",
"bias",
"bible",
"biblical",
"bibliographic",
"bibliography",
"bicycle",
"bid",
"bidder",
"bidding",
"bids",
"big",
"bigger",
"biggest",
"bike",
"bikes",
"bikini",
"bill",
"billing",
"billion",
"bills",
"billy",
"bin",
"binary",
"bind",
"binding",
"bingo",
"bio",
"biodiversity",
"biographies",
"biography",
"biol",
"biological",
"biology",
"bios",
"biotechnology",
"bird",
"birds",
"birmingham",
"birth",
"birthday",
"bishop",
"bit",
"bite",
"bits",
"biz",
"bizarre",
"bizrate",
"black",
"blackberry",
"blackjack",
"blacks",
"blade",
"blades",
"blah",
"blair",
"blake",
"blame",
"blank",
"blanket",
"blast",
"bleeding",
"blend",
"bless",
"blessed",
"blind",
"blink",
"block",
"blocked",
"blocking",
"blocks",
"blog",
"blogger",
"bloggers",
"blogging",
"blogs",
"blond",
"blonde",
"blood",
"bloom",
"bloomberg",
"blow",
"blowing",
"blue",
"blues",
"bluetooth",
"blvd",
"bmw",
"board",
"boards",
"boat",
"boating",
"boats",
"bob",
"bobby",
"boc",
"bodies",
"body",
"bold",
"bolivia",
"bolt",
"bomb",
"bon",
"bond",
"bonds",
"bone",
"bones",
"bonus",
"book",
"booking",
"bookings",
"bookmark",
"bookmarks",
"books",
"bookstore",
"bool",
"boolean",
"boom",
"boost",
"boot",
"booth",
"boots",
"booty",
"border",
"borders",
"bored",
"boring",
"born",
"borough",
"bosnia",
"boss",
"boston",
"both",
"bother",
"botswana",
"bottle",
"bottles",
"bottom",
"bought",
"boulder",
"boulevard",
"bound",
"boundaries",
"boundary",
"bouquet",
"boutique",
"bow",
"bowl",
"bowling",
"box",
"boxed",
"boxes",
"boxing",
"boy",
"boys",
"bra",
"bracelet",
"bracelets",
"bracket",
"brad",
"bradford",
"bradley",
"brain",
"brake",
"brakes",
"branch",
"branches",
"brand",
"brandon",
"brands",
"bras",
"brass",
"brave",
"brazil",
"brazilian",
"breach",
"bread",
"break",
"breakdown",
"breakfast",
"breaking",
"breaks",
"breast",
"breath",
"breathing",
"breed",
"breeding",
"breeds",
"brian",
"brick",
"bridal",
"bride",
"bridge",
"bridges",
"brief",
"briefing",
"briefly",
"briefs",
"bright",
"brighton",
"brilliant",
"bring",
"bringing",
"brings",
"brisbane",
"bristol",
"britain",
"britannica",
"british",
"britney",
"broad",
"broadband",
"broadcast",
"broadcasting",
"broader",
"broadway",
"brochure",
"brochures",
"broke",
"broken",
"broker",
"brokers",
"bronze",
"brook",
"brooklyn",
"brooks",
"brother",
"brothers",
"brought",
"brown",
"browse",
"browser",
"browsers",
"browsing",
"bruce",
"brunei",
"brunette",
"brunswick",
"brush",
"brussels",
"brutal",
"bryan",
"bryant",
"bubble",
"buck",
"bucks",
"budapest",
"buddy",
"budget",
"budgets",
"buf",
"buffalo",
"buffer",
"bufing",
"bug",
"bugs",
"build",
"builder",
"builders",
"building",
"buildings",
"builds",
"built",
"bulgaria",
"bulgarian",
"bulk",
"bull",
"bullet",
"bulletin",
"bumper",
"bunch",
"bundle",
"bunny",
"burden",
"bureau",
"buried",
"burke",
"burlington",
"burn",
"burner",
"burning",
"burns",
"burst",
"burton",
"bus",
"buses",
"bush",
"business",
"businesses",
"busy",
"but",
"butler",
"butter",
"butterfly",
"button",
"buttons",
"butts",
"buy",
"buyer",
"buyers",
"buying",
"buys",
"buzz",
"bye",
"byte",
"bytes",
"cab",
"cabin",
"cabinet",
"cabinets",
"cable",
"cables",
"cache",
"cached",
"cad",
"cadillac",
"cafe",
"cage",
"cake",
"cakes",
"cal",
"calcium",
"calculate",
"calculated",
"calculation",
"calculations",
"calculator",
"calculators",
"calendar",
"calendars",
"calgary",
"calibration",
"california",
"call",
"called",
"calling",
"calls",
"calm",
"calvin",
"cam",
"cambodia",
"cambridge",
"camcorder",
"camcorders",
"came",
"camel",
"camera",
"cameras",
"cameron",
"cameroon",
"camp",
"campaign",
"campaigns",
"campbell",
"camping",
"camps",
"campus",
"cams",
"can",
"canada",
"canadian",
"canal",
"canberra",
"cancel",
"cancellation",
"cancelled",
"cancer",
"candidate",
"candidates",
"candle",
"candles",
"candy",
"cannon",
"canon",
"cant",
"canvas",
"canyon",
"cap",
"capabilities",
"capability",
"capable",
"capacity",
"cape",
"capital",
"capitol",
"caps",
"captain",
"capture",
"captured",
"car",
"carb",
"carbon",
"card",
"cardiac",
"cardiff",
"cardiovascular",
"cards",
"care",
"career",
"careers",
"careful",
"carefully",
"carey",
"cargo",
"caribbean",
"caring",
"carl",
"carlo",
"carlos",
"carmen",
"carnival",
"carol",
"carolina",
"caroline",
"carpet",
"carried",
"carrier",
"carriers",
"carries",
"carroll",
"carry",
"carrying",
"cars",
"cart",
"carter",
"cartoon",
"cartoons",
"cartridge",
"cartridges",
"cas",
"casa",
"case",
"cases",
"casey",
"cash",
"cashiers",
"casino",
"casinos",
"casio",
"cassette",
"cast",
"casting",
"castle",
"casual",
"cat",
"catalog",
"catalogs",
"catalogue",
"catalyst",
"catch",
"categories",
"category",
"catering",
"cathedral",
"catherine",
"catholic",
"cats",
"cattle",
"caught",
"cause",
"caused",
"causes",
"causing",
"caution",
"cave",
"cayman",
"cbs",
"ccd",
"cdna",
"cds",
"cdt",
"cedar",
"ceiling",
"celebrate",
"celebration",
"celebrities",
"celebrity",
"celebs",
"cell",
"cells",
"cellular",
"celtic",
"cement",
"cemetery",
"census",
"cent",
"center",
"centered",
"centers",
"central",
"centre",
"centres",
"cents",
"centuries",
"century",
"ceo",
"ceramic",
"ceremony",
"certain",
"certainly",
"certificate",
"certificates",
"certification",
"certified",
"cet",
"cfr",
"cgi",
"chad",
"chain",
"chains",
"chair",
"chairman",
"chairs",
"challenge",
"challenged",
"challenges",
"challenging",
"chamber",
"chambers",
"champagne",
"champion",
"champions",
"championship",
"championships",
"chan",
"chance",
"chancellor",
"chances",
"change",
"changed",
"changelog",
"changes",
"changing",
"channel",
"channels",
"chaos",
"chapel",
"chapter",
"chapters",
"char",
"character",
"characteristic",
"characteristics",
"characterization",
"characterized",
"characters",
"charge",
"charged",
"charger",
"chargers",
"charges",
"charging",
"charitable",
"charity",
"charles",
"charleston",
"charlie",
"charlotte",
"charm",
"charming",
"charms",
"chart",
"charter",
"charts",
"chase",
"chassis",
"chat",
"cheap",
"cheaper",
"cheapest",
"cheat",
"cheats",
"check",
"checked",
"checking",
"checklist",
"checkout",
"checks",
"cheers",
"cheese",
"chef",
"chelsea",
"chem",
"chemical",
"chemicals",
"chemistry",
"chen",
"cheque",
"cherry",
"chess",
"chest",
"chester",
"chevrolet",
"chevy",
"chi",
"chicago",
"chick",
"chicken",
"chicks",
"chief",
"child",
"childhood",
"children",
"childrens",
"chile",
"china",
"chinese",
"chip",
"chips",
"cho",
"chocolate",
"choice",
"choices",
"choir",
"cholesterol",
"choose",
"choosing",
"chorus",
"chose",
"chosen",
"chris",
"christ",
"christian",
"christianity",
"christians",
"christina",
"christine",
"christmas",
"christopher",
"chrome",
"chronic",
"chronicle",
"chronicles",
"chrysler",
"chubby",
"chuck",
"church",
"churches",
"cia",
"cialis",
"ciao",
"cigarette",
"cigarettes",
"cincinnati",
"cindy",
"cinema",
"cingular",
"cio",
"cir",
"circle",
"circles",
"circuit",
"circuits",
"circular",
"circulation",
"circumstances",
"circus",
"cisco",
"citation",
"citations",
"cite",
"cited",
"cities",
"citizen",
"citizens",
"citizenship",
"city",
"citysearch",
"civic",
"civil",
"civilian",
"civilization",
"claim",
"claimed",
"claims",
"claire",
"clan",
"clara",
"clarity",
"clark",
"clarke",
"class",
"classes",
"classic",
"classical",
"classics",
"classification",
"classified",
"classifieds",
"classroom",
"clause",
"clay",
"clean",
"cleaner",
"cleaners",
"cleaning",
"cleanup",
"clear",
"clearance",
"cleared",
"clearing",
"clearly",
"clerk",
"cleveland",
"click",
"clicking",
"clicks",
"client",
"clients",
"cliff",
"climate",
"climb",
"climbing",
"clinic",
"clinical",
"clinics",
"clinton",
"clip",
"clips",
"clock",
"clocks",
"clone",
"close",
"closed",
"closely",
"closer",
"closes",
"closest",
"closing",
"closure",
"cloth",
"clothes",
"clothing",
"cloud",
"clouds",
"cloudy",
"club",
"clubs",
"cluster",
"clusters",
"cms",
"cnet",
"cnn",
"coach",
"coaches",
"coaching",
"coal",
"coalition",
"coast",
"coastal",
"coat",
"coated",
"coating",
"cocktail",
"cod",
"code",
"codes",
"coding",
"coffee",
"cognitive",
"cohen",
"coin",
"coins",
"col",
"cold",
"cole",
"coleman",
"colin",
"collaboration",
"collaborative",
"collapse",
"collar",
"colleague",
"colleagues",
"collect",
"collectables",
"collected",
"collectible",
"collectibles",
"collecting",
"collection",
"collections",
"collective",
"collector",
"collectors",
"college",
"colleges",
"collins",
"cologne",
"colombia",
"colon",
"colonial",
"colony",
"color",
"colorado",
"colored",
"colors",
"columbia",
"columbus",
"column",
"columnists",
"columns",
"com",
"combat",
"combination",
"combinations",
"combine",
"combined",
"combines",
"combining",
"combo",
"come",
"comedy",
"comes",
"comfort",
"comfortable",
"comic",
"comics",
"coming",
"comm",
"command",
"commander",
"commands",
"comment",
"commentary",
"commented",
"comments",
"commerce",
"commercial",
"commission",
"commissioner",
"commissioners",
"commissions",
"commit",
"commitment",
"commitments",
"committed",
"committee",
"committees",
"commodities",
"commodity",
"common",
"commonly",
"commons",
"commonwealth",
"communicate",
"communication",
"communications",
"communist",
"communities",
"community",
"comp",
"compact",
"companies",
"companion",
"company",
"compaq",
"comparable",
"comparative",
"compare",
"compared",
"comparing",
"comparison",
"comparisons",
"compatibility",
"compatible",
"compensation",
"compete",
"competent",
"competing",
"competition",
"competitions",
"competitive",
"competitors",
"compilation",
"compile",
"compiled",
"compiler",
"complaint",
"complaints",
"complement",
"complete",
"completed",
"completely",
"completing",
"completion",
"complex",
"complexity",
"compliance",
"compliant",
"complicated",
"complications",
"complimentary",
"comply",
"component",
"components",
"composed",
"composer",
"composite",
"composition",
"compound",
"compounds",
"comprehensive",
"compressed",
"compression",
"compromise",
"computation",
"computational",
"compute",
"computed",
"computer",
"computers",
"computing",
"con",
"concentrate",
"concentration",
"concentrations",
"concept",
"concepts",
"conceptual",
"concern",
"concerned",
"concerning",
"concerns",
"concert",
"concerts",
"conclude",
"concluded",
"conclusion",
"conclusions",
"concord",
"concrete",
"condition",
"conditional",
"conditioning",
"conditions",
"condo",
"condos",
"conduct",
"conducted",
"conducting",
"conf",
"conference",
"conferences",
"conferencing",
"confidence",
"confident",
"confidential",
"confidentiality",
"config",
"configuration",
"configurations",
"configure",
"configured",
"configuring",
"confirm",
"confirmation",
"confirmed",
"conflict",
"conflicts",
"confused",
"confusion",
"congo",
"congratulations",
"congress",
"congressional",
"conjunction",
"connect",
"connected",
"connecticut",
"connecting",
"connection",
"connections",
"connectivity",
"connector",
"connectors",
"cons",
"conscious",
"consciousness",
"consecutive",
"consensus",
"consent",
"consequence",
"consequences",
"consequently",
"conservation",
"conservative",
"consider",
"considerable",
"consideration",
"considerations",
"considered",
"considering",
"considers",
"consist",
"consistency",
"consistent",
"consistently",
"consisting",
"consists",
"console",
"consoles",
"consolidated",
"consolidation",
"consortium",
"conspiracy",
"const",
"constant",
"constantly",
"constitute",
"constitutes",
"constitution",
"constitutional",
"constraint",
"constraints",
"construct",
"constructed",
"construction",
"consult",
"consultancy",
"consultant",
"consultants",
"consultation",
"consulting",
"consumer",
"consumers",
"consumption",
"contact",
"contacted",
"contacting",
"contacts",
"contain",
"contained",
"container",
"containers",
"containing",
"contains",
"contamination",
"contemporary",
"content",
"contents",
"contest",
"contests",
"context",
"continent",
"continental",
"continually",
"continue",
"continued",
"continues",
"continuing",
"continuity",
"continuous",
"continuously",
"contract",
"contracting",
"contractor",
"contractors",
"contracts",
"contrary",
"contrast",
"contribute",
"contributed",
"contributing",
"contribution",
"contributions",
"contributor",
"contributors",
"control",
"controlled",
"controller",
"controllers",
"controlling",
"controls",
"controversial",
"controversy",
"convenience",
"convenient",
"convention",
"conventional",
"conventions",
"convergence",
"conversation",
"conversations",
"conversion",
"convert",
"converted",
"converter",
"convertible",
"convicted",
"conviction",
"convinced",
"cook",
"cookbook",
"cooked",
"cookie",
"cookies",
"cooking",
"cool",
"cooler",
"cooling",
"cooper",
"cooperation",
"cooperative",
"coordinate",
"coordinated",
"coordinates",
"coordination",
"coordinator",
"cop",
"cope",
"copied",
"copies",
"copper",
"copy",
"copying",
"copyright",
"copyrighted",
"copyrights",
"coral",
"cord",
"cordless",
"core",
"cork",
"corn",
"cornell",
"corner",
"corners",
"cornwall",
"corp",
"corporate",
"corporation",
"corporations",
"corps",
"corpus",
"correct",
"corrected",
"correction",
"corrections",
"correctly",
"correlation",
"correspondence",
"corresponding",
"corruption",
"cos",
"cosmetic",
"cosmetics",
"cost",
"costa",
"costs",
"costume",
"costumes",
"cottage",
"cottages",
"cotton",
"could",
"council",
"councils",
"counsel",
"counseling",
"count",
"counted",
"counter",
"counters",
"counties",
"counting",
"countries",
"country",
"counts",
"county",
"couple",
"coupled",
"couples",
"coupon",
"coupons",
"courage",
"courier",
"course",
"courses",
"court",
"courtesy",
"courts",
"cove",
"cover",
"coverage",
"covered",
"covering",
"covers",
"cow",
"cowboy",
"cpu",
"crack",
"cradle",
"craft",
"crafts",
"craig",
"craps",
"crash",
"crawford",
"crazy",
"cream",
"create",
"created",
"creates",
"creating",
"creation",
"creations",
"creative",
"creativity",
"creator",
"creature",
"creatures",
"credit",
"credits",
"creek",
"crest",
"crew",
"cricket",
"crime",
"crimes",
"criminal",
"crisis",
"criteria",
"criterion",
"critical",
"criticism",
"critics",
"crm",
"croatia",
"crop",
"crops",
"cross",
"crossing",
"crossword",
"crowd",
"crown",
"crucial",
"crude",
"cruise",
"cruises",
"cruz",
"cry",
"crystal",
"css",
"cst",
"ctrl",
"cuba",
"cube",
"cubic",
"cuisine",
"cult",
"cultural",
"culture",
"cultures",
"cumulative",
"cup",
"cups",
"cure",
"curious",
"currencies",
"currency",
"current",
"currently",
"curriculum",
"cursor",
"curtis",
"curve",
"curves",
"custody",
"custom",
"customer",
"customers",
"customize",
"customized",
"customs",
"cut",
"cute",
"cuts",
"cutting",
"cvs",
"cyber",
"cycle",
"cycles",
"cycling",
"cylinder",
"cyprus",
"czech",
"dad",
"daddy",
"daily",
"dairy",
"daisy",
"dakota",
"dale",
"dallas",
"dam",
"damage",
"damaged",
"damages",
"dame",
"dan",
"dana",
"dance",
"dancing",
"danger",
"dangerous",
"daniel",
"danish",
"danny",
"dans",
"dare",
"dark",
"darkness",
"darwin",
"das",
"dash",
"dat",
"data",
"database",
"databases",
"date",
"dated",
"dates",
"dating",
"daughter",
"daughters",
"dave",
"david",
"davidson",
"davis",
"dawn",
"day",
"days",
"dayton",
"ddr",
"dead",
"deadline",
"deadly",
"deaf",
"deal",
"dealer",
"dealers",
"dealing",
"deals",
"dealt",
"dealtime",
"dean",
"dear",
"death",
"deaths",
"debate",
"debian",
"deborah",
"debt",
"debug",
"debut",
"dec",
"decade",
"decades",
"december",
"decent",
"decide",
"decided",
"decimal",
"decision",
"decisions",
"deck",
"declaration",
"declare",
"declared",
"decline",
"declined",
"decor",
"decorating",
"decorative",
"decrease",
"decreased",
"dedicated",
"dee",
"deemed",
"deep",
"deeper",
"deeply",
"deer",
"def",
"default",
"defeat",
"defects",
"defence",
"defend",
"defendant",
"defense",
"defensive",
"deferred",
"deficit",
"define",
"defined",
"defines",
"defining",
"definitely",
"definition",
"definitions",
"degree",
"degrees",
"del",
"delaware",
"delay",
"delayed",
"delays",
"delegation",
"delete",
"deleted",
"delhi",
"delicious",
"delight",
"deliver",
"delivered",
"delivering",
"delivers",
"delivery",
"dell",
"delta",
"deluxe",
"dem",
"demand",
"demanding",
"demands",
"demo",
"democracy",
"democrat",
"democratic",
"democrats",
"demographic",
"demonstrate",
"demonstrated",
"demonstrates",
"demonstration",
"den",
"denial",
"denied",
"denmark",
"dennis",
"dense",
"density",
"dental",
"dentists",
"denver",
"deny",
"department",
"departmental",
"departments",
"departure",
"depend",
"dependence",
"dependent",
"depending",
"depends",
"deployment",
"deposit",
"deposits",
"depot",
"depression",
"dept",
"depth",
"deputy",
"der",
"derby",
"derek",
"derived",
"des",
"descending",
"describe",
"described",
"describes",
"describing",
"description",
"descriptions",
"desert",
"deserve",
"design",
"designated",
"designation",
"designed",
"designer",
"designers",
"designing",
"designs",
"desirable",
"desire",
"desired",
"desk",
"desktop",
"desktops",
"desperate",
"despite",
"destination",
"destinations",
"destiny",
"destroy",
"destroyed",
"destruction",
"detail",
"detailed",
"details",
"detect",
"detected",
"detection",
"detective",
"detector",
"determination",
"determine",
"determined",
"determines",
"determining",
"detroit",
"deutsch",
"deutsche",
"deutschland",
"dev",
"devel",
"develop",
"developed",
"developer",
"developers",
"developing",
"development",
"developmental",
"developments",
"develops",
"deviant",
"deviation",
"device",
"devices",
"devil",
"devon",
"devoted",
"diabetes",
"diagnosis",
"diagnostic",
"diagram",
"dial",
"dialog",
"dialogue",
"diameter",
"diamond",
"diamonds",
"diana",
"diane",
"diary",
"dice",
"dicke",
"dictionaries",
"dictionary",
"did",
"die",
"died",
"diego",
"dies",
"diesel",
"diet",
"dietary",
"diff",
"differ",
"difference",
"differences",
"different",
"differential",
"differently",
"difficult",
"difficulties",
"difficulty",
"diffs",
"dig",
"digest",
"digit",
"digital",
"dim",
"dimension",
"dimensional",
"dimensions",
"dining",
"dinner",
"dip",
"diploma",
"dir",
"direct",
"directed",
"direction",
"directions",
"directive",
"directly",
"director",
"directories",
"directors",
"directory",
"dirt",
"dirty",
"dis",
"disabilities",
"disability",
"disable",
"disabled",
"disagree",
"disappointed",
"disaster",
"disc",
"discharge",
"disciplinary",
"discipline",
"disciplines",
"disclaimer",
"disclaimers",
"disclose",
"disclosure",
"disco",
"discount",
"discounted",
"discounts",
"discover",
"discovered",
"discovery",
"discrete",
"discretion",
"discrimination",
"discs",
"discuss",
"discussed",
"discusses",
"discussing",
"discussion",
"discussions",
"disease",
"diseases",
"dish",
"dishes",
"disk",
"disks",
"disney",
"disorder",
"disorders",
"dispatch",
"dispatched",
"display",
"displayed",
"displaying",
"displays",
"disposal",
"disposition",
"dispute",
"disputes",
"dist",
"distance",
"distances",
"distant",
"distinct",
"distinction",
"distinguished",
"distribute",
"distributed",
"distribution",
"distributions",
"distributor",
"distributors",
"district",
"districts",
"disturbed",
"div",
"dive",
"diverse",
"diversity",
"divide",
"divided",
"dividend",
"divine",
"diving",
"division",
"divisions",
"divorce",
"divx",
"diy",
"dna",
"dns",
"doc",
"dock",
"docs",
"doctor",
"doctors",
"doctrine",
"document",
"documentary",
"documentation",
"documented",
"documents",
"dod",
"dodge",
"doe",
"does",
"dog",
"dogs",
"doing",
"doll",
"dollar",
"dollars",
"dolls",
"dom",
"domain",
"domains",
"dome",
"domestic",
"dominant",
"dominican",
"don",
"donald",
"donate",
"donated",
"donation",
"donations",
"done",
"donna",
"donor",
"donors",
"dont",
"doom",
"door",
"doors",
"dos",
"dosage",
"dose",
"dot",
"double",
"doubt",
"doug",
"douglas",
"dover",
"dow",
"down",
"download",
"downloadable",
"downloaded",
"downloading",
"downloads",
"downtown",
"dozen",
"dozens",
"dpi",
"draft",
"drag",
"dragon",
"drain",
"drainage",
"drama",
"dramatic",
"dramatically",
"draw",
"drawing",
"drawings",
"drawn",
"draws",
"dream",
"dreams",
"dress",
"dressed",
"dresses",
"dressing",
"drew",
"dried",
"drill",
"drilling",
"drink",
"drinking",
"drinks",
"drive",
"driven",
"driver",
"drivers",
"drives",
"driving",
"drop",
"dropped",
"drops",
"drove",
"drug",
"drugs",
"drum",
"drums",
"drunk",
"dry",
"dryer",
"dsc",
"dsl",
"dts",
"dual",
"dubai",
"dublin",
"duck",
"dude",
"due",
"dui",
"duke",
"dumb",
"dump",
"duncan",
"duo",
"duplicate",
"durable",
"duration",
"durham",
"during",
"dust",
"dutch",
"duties",
"duty",
"dvd",
"dvds",
"dying",
"dylan",
"dynamic",
"dynamics",
"each",
"eagle",
"eagles",
"ear",
"earl",
"earlier",
"earliest",
"early",
"earn",
"earned",
"earning",
"earnings",
"earrings",
"ears",
"earth",
"earthquake",
"ease",
"easier",
"easily",
"east",
"easter",
"eastern",
"easy",
"eat",
"eating",
"eau",
"ebay",
"ebony",
"ebook",
"ebooks",
"echo",
"eclipse",
"eco",
"ecological",
"ecology",
"ecommerce",
"economic",
"economics",
"economies",
"economy",
"ecuador",
"eddie",
"eden",
"edgar",
"edge",
"edges",
"edinburgh",
"edit",
"edited",
"editing",
"edition",
"editions",
"editor",
"editorial",
"editorials",
"editors",
"edmonton",
"eds",
"edt",
"educated",
"education",
"educational",
"educators",
"edward",
"edwards",
"effect",
"effective",
"effectively",
"effectiveness",
"effects",
"efficiency",
"efficient",
"efficiently",
"effort",
"efforts",
"egg",
"eggs",
"egypt",
"egyptian",
"eight",
"either",
"elder",
"elderly",
"elect",
"elected",
"election",
"elections",
"electoral",
"electric",
"electrical",
"electricity",
"electro",
"electron",
"electronic",
"electronics",
"elegant",
"element",
"elementary",
"elements",
"elephant",
"elevation",
"eleven",
"eligibility",
"eligible",
"eliminate",
"elimination",
"elite",
"elizabeth",
"ellen",
"elliott",
"ellis",
"else",
"elsewhere",
"elvis",
"emacs",
"email",
"emails",
"embassy",
"embedded",
"emerald",
"emergency",
"emerging",
"emily",
"eminem",
"emirates",
"emission",
"emissions",
"emma",
"emotional",
"emotions",
"emperor",
"emphasis",
"empire",
"empirical",
"employ",
"employed",
"employee",
"employees",
"employer",
"employers",
"employment",
"empty",
"enable",
"enabled",
"enables",
"enabling",
"enb",
"enclosed",
"enclosure",
"encoding",
"encounter",
"encountered",
"encourage",
"encouraged",
"encourages",
"encouraging",
"encryption",
"encyclopedia",
"end",
"endangered",
"ended",
"endif",
"ending",
"endless",
"endorsed",
"endorsement",
"ends",
"enemies",
"enemy",
"energy",
"enforcement",
"eng",
"engage",
"engaged",
"engagement",
"engaging",
"engine",
"engineer",
"engineering",
"engineers",
"engines",
"england",
"english",
"enhance",
"enhanced",
"enhancement",
"enhancements",
"enhancing",
"enjoy",
"enjoyed",
"enjoying",
"enlarge",
"enlargement",
"enormous",
"enough",
"enquiries",
"enquiry",
"enrolled",
"enrollment",
"ensemble",
"ensure",
"ensures",
"ensuring",
"ent",
"enter",
"entered",
"entering",
"enterprise",
"enterprises",
"enters",
"entertaining",
"entertainment",
"entire",
"entirely",
"entities",
"entitled",
"entity",
"entrance",
"entrepreneur",
"entrepreneurs",
"entries",
"entry",
"envelope",
"environment",
"environmental",
"environments",
"enzyme",
"eos",
"epa",
"epic",
"epinions",
"episode",
"episodes",
"epson",
"equal",
"equality",
"equally",
"equation",
"equations",
"equilibrium",
"equipment",
"equipped",
"equity",
"equivalent",
"era",
"eric",
"ericsson",
"erik",
"erotica",
"erp",
"error",
"errors",
"escape",
"escorts",
"especially",
"espn",
"essay",
"essays",
"essence",
"essential",
"essentially",
"essentials",
"essex",
"est",
"establish",
"established",
"establishing",
"establishment",
"estate",
"estates",
"estimate",
"estimated",
"estimates",
"estimation",
"estonia",
"etc",
"eternal",
"ethernet",
"ethical",
"ethics",
"ethiopia",
"ethnic",
"eugene",
"eur",
"euro",
"europe",
"european",
"euros",
"eva",
"eval",
"evaluate",
"evaluated",
"evaluating",
"evaluation",
"evaluations",
"evanescence",
"evans",
"eve",
"even",
"evening",
"event",
"events",
"eventually",
"ever",
"every",
"everybody",
"everyday",
"everyone",
"everything",
"everywhere",
"evidence",
"evident",
"evil",
"evolution",
"exact",
"exactly",
"exam",
"examination",
"examinations",
"examine",
"examined",
"examines",
"examining",
"example",
"examples",
"exams",
"exceed",
"excel",
"excellence",
"excellent",
"except",
"exception",
"exceptional",
"exceptions",
"excerpt",
"excess",
"excessive",
"exchange",
"exchanges",
"excited",
"excitement",
"exciting",
"exclude",
"excluded",
"excluding",
"exclusion",
"exclusive",
"exclusively",
"excuse",
"exec",
"execute",
"executed",
"execution",
"executive",
"executives",
"exempt",
"exemption",
"exercise",
"exercises",
"exhaust",
"exhibit",
"exhibition",
"exhibitions",
"exhibits",
"exist",
"existed",
"existence",
"existing",
"exists",
"exit",
"exotic",
"exp",
"expand",
"expanded",
"expanding",
"expansion",
"expansys",
"expect",
"expectations",
"expected",
"expects",
"expedia",
"expenditure",
"expenditures",
"expense",
"expenses",
"expensive",
"experience",
"experienced",
"experiences",
"experiencing",
"experiment",
"experimental",
"experiments",
"expert",
"expertise",
"experts",
"expiration",
"expired",
"expires",
"explain",
"explained",
"explaining",
"explains",
"explanation",
"explicit",
"explicitly",
"exploration",
"explore",
"explorer",
"exploring",
"explosion",
"expo",
"export",
"exports",
"exposed",
"exposure",
"express",
"expressed",
"expression",
"expressions",
"ext",
"extend",
"extended",
"extending",
"extends",
"extension",
"extensions",
"extensive",
"extent",
"exterior",
"external",
"extra",
"extract",
"extraction",
"extraordinary",
"extras",
"extreme",
"extremely",
"eye",
"eyed",
"eyes",
"fabric",
"fabrics",
"fabulous",
"face",
"faced",
"faces",
"facial",
"facilitate",
"facilities",
"facility",
"facing",
"fact",
"factor",
"factors",
"factory",
"facts",
"faculty",
"fail",
"failed",
"failing",
"fails",
"failure",
"failures",
"fair",
"fairfield",
"fairly",
"fairy",
"faith",
"fake",
"fall",
"fallen",
"falling",
"falls",
"false",
"fame",
"familiar",
"families",
"family",
"famous",
"fan",
"fancy",
"fans",
"fantastic",
"fantasy",
"faq",
"faqs",
"far",
"fare",
"fares",
"farm",
"farmer",
"farmers",
"farming",
"farms",
"fascinating",
"fashion",
"fast",
"faster",
"fastest",
"fat",
"fatal",
"fate",
"father",
"fathers",
"fatty",
"fault",
"favor",
"favorite",
"favorites",
"favors",
"fax",
"fbi",
"fcc",
"fda",
"fear",
"fears",
"feat",
"feature",
"featured",
"features",
"featuring",
"feb",
"february",
"fed",
"federal",
"federation",
"fee",
"feed",
"feedback",
"feeding",
"feeds",
"feel",
"feeling",
"feelings",
"feels",
"fees",
"feet",
"fell",
"fellow",
"fellowship",
"felt",
"female",
"females",
"fence",
"feof",
"ferrari",
"ferry",
"festival",
"festivals",
"fetish",
"fever",
"few",
"fewer",
"fiber",
"fibre",
"fiction",
"field",
"fields",
"fifteen",
"fifth",
"fifty",
"fig",
"fight",
"fighter",
"fighters",
"fighting",
"figure",
"figured",
"figures",
"fiji",
"file",
"filed",
"filename",
"files",
"filing",
"fill",
"filled",
"filling",
"film",
"filme",
"films",
"filter",
"filtering",
"filters",
"fin",
"final",
"finally",
"finals",
"finance",
"finances",
"financial",
"financing",
"find",
"findarticles",
"finder",
"finding",
"findings",
"findlaw",
"finds",
"fine",
"finest",
"finger",
"fingers",
"finish",
"finished",
"finishing",
"finite",
"finland",
"finnish",
"fioricet",
"fire",
"fired",
"firefox",
"fireplace",
"fires",
"firewall",
"firewire",
"firm",
"firms",
"firmware",
"first",
"fiscal",
"fish",
"fisher",
"fisheries",
"fishing",
"fist",
"fit",
"fitness",
"fits",
"fitted",
"fitting",
"five",
"fix",
"fixed",
"fixes",
"fixtures",
"flag",
"flags",
"flame",
"flash",
"flashers",
"flashing",
"flat",
"flavor",
"fleece",
"fleet",
"flesh",
"flex",
"flexibility",
"flexible",
"flickr",
"flight",
"flights",
"flip",
"float",
"floating",
"flood",
"floor",
"flooring",
"floors",
"floppy",
"floral",
"florence",
"florida",
"florist",
"florists",
"flour",
"flow",
"flower",
"flowers",
"flows",
"floyd",
"flu",
"fluid",
"flush",
"flux",
"fly",
"flyer",
"flying",
"foam",
"focal",
"focus",
"focused",
"focuses",
"focusing",
"fog",
"fold",
"folder",
"folders",
"folding",
"folk",
"folks",
"follow",
"followed",
"following",
"follows",
"font",
"fonts",
"foo",
"food",
"foods",
"fool",
"foot",
"footage",
"football",
"footwear",
"for",
"forbes",
"forbidden",
"force",
"forced",
"forces",
"ford",
"forecast",
"forecasts",
"foreign",
"forest",
"forestry",
"forests",
"forever",
"forge",
"forget",
"forgot",
"forgotten",
"fork",
"form",
"formal",
"format",
"formation",
"formats",
"formatting",
"formed",
"former",
"formerly",
"forming",
"forms",
"formula",
"fort",
"forth",
"fortune",
"forty",
"forum",
"forums",
"forward",
"forwarding",
"fossil",
"foster",
"foto",
"fotos",
"fought",
"foul",
"found",
"foundation",
"foundations",
"founded",
"founder",
"fountain",
"four",
"fourth",
"fox",
"fraction",
"fragrance",
"fragrances",
"frame",
"framed",
"frames",
"framework",
"framing",
"france",
"franchise",
"francis",
"francisco",
"frank",
"frankfurt",
"franklin",
"fraser",
"fraud",
"fred",
"frederick",
"free",
"freebsd",
"freedom",
"freelance",
"freely",
"freeware",
"freeze",
"freight",
"french",
"frequencies",
"frequency",
"frequent",
"frequently",
"fresh",
"fri",
"friday",
"fridge",
"friend",
"friendly",
"friends",
"friendship",
"frog",
"from",
"front",
"frontier",
"frontpage",
"frost",
"frozen",
"fruit",
"fruits",
"ftp",
"fuel",
"fuji",
"fujitsu",
"full",
"fully",
"fun",
"function",
"functional",
"functionality",
"functioning",
"functions",
"fund",
"fundamental",
"fundamentals",
"funded",
"funding",
"fundraising",
"funds",
"funeral",
"funk",
"funky",
"funny",
"fur",
"furnished",
"furnishings",
"furniture",
"further",
"furthermore",
"fusion",
"future",
"futures",
"fuzzy",
"fwd",
"gabriel",
"gadgets",
"gage",
"gain",
"gained",
"gains",
"galaxy",
"gale",
"galleries",
"gallery",
"gambling",
"game",
"gamecube",
"games",
"gamespot",
"gaming",
"gamma",
"gang",
"gap",
"gaps",
"garage",
"garbage",
"garcia",
"garden",
"gardening",
"gardens",
"garlic",
"garmin",
"gary",
"gas",
"gasoline",
"gate",
"gates",
"gateway",
"gather",
"gathered",
"gathering",
"gauge",
"gave",
"gay",
"gays",
"gazette",
"gba",
"gbp",
"gcc",
"gdp",
"gear",
"geek",
"gel",
"gem",
"gen",
"gender",
"gene",
"genealogy",
"general",
"generally",
"generate",
"generated",
"generates",
"generating",
"generation",
"generations",
"generator",
"generators",
"generic",
"generous",
"genes",
"genesis",
"genetic",
"genetics",
"geneva",
"genius",
"genome",
"genre",
"genres",
"gentle",
"gentleman",
"gently",
"genuine",
"geo",
"geographic",
"geographical",
"geography",
"geological",
"geology",
"geometry",
"george",
"georgia",
"gerald",
"german",
"germany",
"get",
"gets",
"getting",
"ghana",
"ghost",
"ghz",
"giant",
"giants",
"gibraltar",
"gibson",
"gif",
"gift",
"gifts",
"gig",
"gilbert",
"girl",
"girlfriend",
"girls",
"gis",
"give",
"given",
"gives",
"giving",
"glad",
"glance",
"glasgow",
"glass",
"glasses",
"glen",
"glenn",
"global",
"globe",
"glory",
"glossary",
"gloves",
"glow",
"glucose",
"gmbh",
"gmc",
"gmt",
"gnome",
"gnu",
"goal",
"goals",
"goat",
"gods",
"goes",
"going",
"gold",
"golden",
"golf",
"gone",
"gonna",
"good",
"goods",
"google",
"gordon",
"gore",
"gorgeous",
"gospel",
"gossip",
"got",
"gothic",
"goto",
"gotta",
"gotten",
"gourmet",
"governance",
"governing",
"government",
"governmental",
"governments",
"governor",
"gpl",
"gps",
"grab",
"grace",
"grad",
"grade",
"grades",
"gradually",
"graduate",
"graduated",
"graduates",
"graduation",
"graham",
"grain",
"grammar",
"grams",
"grand",
"grande",
"granny",
"grant",
"granted",
"grants",
"graph",
"graphic",
"graphical",
"graphics",
"graphs",
"gras",
"grass",
"grateful",
"gratis",
"gratuit",
"grave",
"gravity",
"gray",
"great",
"greater",
"greatest",
"greatly",
"greece",
"greek",
"green",
"greene",
"greenhouse",
"greensboro",
"greeting",
"greetings",
"greg",
"gregory",
"grenada",
"grew",
"grey",
"grid",
"griffin",
"grill",
"grip",
"grocery",
"groove",
"gross",
"ground",
"grounds",
"groundwater",
"group",
"groups",
"grove",
"grow",
"growing",
"grown",
"grows",
"growth",
"gsm",
"gst",
"gtk",
"guam",
"guarantee",
"guaranteed",
"guarantees",
"guard",
"guardian",
"guards",
"guatemala",
"guess",
"guest",
"guestbook",
"guests",
"gui",
"guidance",
"guide",
"guided",
"guidelines",
"guides",
"guild",
"guilty",
"guinea",
"guitar",
"guitars",
"gulf",
"gun",
"guns",
"guru",
"guy",
"guyana",
"guys",
"gym",
"gzip",
"habitat",
"habits",
"hack",
"hacker",
"had",
"hair",
"hairy",
"haiti",
"half",
"halifax",
"hall",
"halloween",
"halo",
"ham",
"hamburg",
"hamilton",
"hammer",
"hampshire",
"hampton",
"hand",
"handbags",
"handbook",
"handed",
"handheld",
"handhelds",
"handle",
"handled",
"handles",
"handling",
"handmade",
"hands",
"handy",
"hang",
"hanging",
"hans",
"hansen",
"happen",
"happened",
"happening",
"happens",
"happiness",
"happy",
"harassment",
"harbor",
"hard",
"hardcover",
"harder",
"hardly",
"hardware",
"hardwood",
"harley",
"harm",
"harmful",
"harmony",
"harold",
"harper",
"harris",
"harrison",
"harry",
"hart",
"hartford",
"harvard",
"harvest",
"harvey",
"has",
"hash",
"hat",
"hate",
"hats",
"have",
"haven",
"having",
"hawaii",
"hawaiian",
"hawk",
"hay",
"hayes",
"hazard",
"hazardous",
"hazards",
"hdtv",
"head",
"headed",
"header",
"headers",
"heading",
"headline",
"headlines",
"headphones",
"headquarters",
"heads",
"headset",
"healing",
"health",
"healthcare",
"healthy",
"hear",
"heard",
"hearing",
"hearings",
"heart",
"hearts",
"heat",
"heated",
"heater",
"heath",
"heather",
"heating",
"heaven",
"heavily",
"heavy",
"hebrew",
"heel",
"height",
"heights",
"held",
"helen",
"helena",
"helicopter",
"hello",
"helmet",
"help",
"helped",
"helpful",
"helping",
"helps",
"hence",
"henderson",
"henry",
"hepatitis",
"her",
"herald",
"herb",
"herbal",
"herbs",
"here",
"hereby",
"herein",
"heritage",
"hero",
"heroes",
"herself",
"hewlett",
"hey",
"hidden",
"hide",
"hierarchy",
"high",
"higher",
"highest",
"highland",
"highlight",
"highlighted",
"highlights",
"highly",
"highs",
"highway",
"highways",
"hiking",
"hill",
"hills",
"hilton",
"him",
"himself",
"hindu",
"hint",
"hints",
"hip",
"hire",
"hired",
"hiring",
"his",
"hispanic",
"hist",
"historic",
"historical",
"history",
"hit",
"hitachi",
"hits",
"hitting",
"hiv",
"hobbies",
"hobby",
"hockey",
"hold",
"holdem",
"holder",
"holders",
"holding",
"holdings",
"holds",
"hole",
"holes",
"holiday",
"holidays",
"holland",
"hollow",
"holly",
"hollywood",
"holmes",
"holocaust",
"holy",
"home",
"homeland",
"homeless",
"homepage",
"homes",
"hometown",
"homework",
"hon",
"honda",
"honduras",
"honest",
"honey",
"hong",
"honolulu",
"honor",
"honors",
"hood",
"hook",
"hop",
"hope",
"hoped",
"hopefully",
"hopes",
"hoping",
"hopkins",
"horizon",
"horizontal",
"hormone",
"horn",
"horrible",
"horror",
"horse",
"horses",
"hose",
"hospital",
"hospitality",
"hospitals",
"host",
"hosted",
"hostel",
"hostels",
"hosting",
"hosts",
"hot",
"hotel",
"hotels",
"hotmail",
"hottest",
"hour",
"hourly",
"hours",
"house",
"household",
"households",
"houses",
"housewares",
"housewives",
"housing",
"houston",
"how",
"howard",
"however",
"howto",
"href",
"hrs",
"html",
"http",
"hub",
"hudson",
"huge",
"hugh",
"hughes",
"hugo",
"hull",
"human",
"humanitarian",
"humanities",
"humanity",
"humans",
"humidity",
"humor",
"hundred",
"hundreds",
"hung",
"hungarian",
"hungary",
"hunger",
"hungry",
"hunt",
"hunter",
"hunting",
"huntington",
"hurricane",
"hurt",
"husband",
"hwy",
"hybrid",
"hydraulic",
"hydrocodone",
"hydrogen",
"hygiene",
"hypothesis",
"hypothetical",
"hyundai",
"ian",
"ibm",
"ice",
"iceland",
"icon",
"icons",
"icq",
"ict",
"idaho",
"ide",
"idea",
"ideal",
"ideas",
"identical",
"identification",
"identified",
"identifier",
"identifies",
"identify",
"identifying",
"identity",
"idle",
"idol",
"ids",
"ieee",
"ignore",
"ignored",
"iii",
"ill",
"illegal",
"illinois",
"illness",
"illustrated",
"illustration",
"illustrations",
"image",
"images",
"imagination",
"imagine",
"imaging",
"img",
"immediate",
"immediately",
"immigrants",
"immigration",
"immune",
"immunology",
"impact",
"impacts",
"impaired",
"imperial",
"implement",
"implementation",
"implemented",
"implementing",
"implications",
"implied",
"implies",
"import",
"importance",
"important",
"importantly",
"imported",
"imports",
"impose",
"imposed",
"impossible",
"impressed",
"impression",
"impressive",
"improve",
"improved",
"improvement",
"improvements",
"improving",
"inappropriate",
"inbox",
"inc",
"incentive",
"incentives",
"inch",
"inches",
"incidence",
"incident",
"incidents",
"incl",
"include",
"included",
"includes",
"including",
"inclusion",
"inclusive",
"income",
"incoming",
"incomplete",
"incorporate",
"incorporated",
"incorrect",
"increase",
"increased",
"increases",
"increasing",
"increasingly",
"incredible",
"incurred",
"ind",
"indeed",
"independence",
"independent",
"independently",
"index",
"indexed",
"indexes",
"india",
"indian",
"indiana",
"indianapolis",
"indians",
"indicate",
"indicated",
"indicates",
"indicating",
"indication",
"indicator",
"indicators",
"indices",
"indie",
"indigenous",
"indirect",
"individual",
"individually",
"individuals",
"indonesia",
"indonesian",
"indoor",
"induced",
"induction",
"industrial",
"industries",
"industry",
"inexpensive",
"inf",
"infant",
"infants",
"infected",
"infection",
"infections",
"infectious",
"infinite",
"inflation",
"influence",
"influenced",
"influences",
"info",
"inform",
"informal",
"information",
"informational",
"informative",
"informed",
"infrared",
"infrastructure",
"infringement",
"ing",
"ingredients",
"inherited",
"initial",
"initially",
"initiated",
"initiative",
"initiatives",
"injection",
"injured",
"injuries",
"injury",
"ink",
"inkjet",
"inline",
"inn",
"inner",
"innocent",
"innovation",
"innovations",
"innovative",
"inns",
"input",
"inputs",
"inquire",
"inquiries",
"inquiry",
"ins",
"insects",
"insert",
"inserted",
"insertion",
"inside",
"insider",
"insight",
"insights",
"inspection",
"inspections",
"inspector",
"inspiration",
"inspired",
"install",
"installation",
"installations",
"installed",
"installing",
"instance",
"instances",
"instant",
"instantly",
"instead",
"institute",
"institutes",
"institution",
"institutional",
"institutions",
"instruction",
"instructional",
"instructions",
"instructor",
"instructors",
"instrument",
"instrumental",
"instrumentation",
"instruments",
"insulation",
"insulin",
"insurance",
"insured",
"int",
"intake",
"integer",
"integral",
"integrate",
"integrated",
"integrating",
"integration",
"integrity",
"intel",
"intellectual",
"intelligence",
"intelligent",
"intend",
"intended",
"intense",
"intensity",
"intensive",
"intent",
"intention",
"inter",
"interact",
"interaction",
"interactions",
"interactive",
"interest",
"interested",
"interesting",
"interests",
"interface",
"interfaces",
"interference",
"interim",
"interior",
"intermediate",
"internal",
"international",
"internationally",
"internet",
"internship",
"interpretation",
"interpreted",
"interracial",
"intersection",
"interstate",
"interval",
"intervals",
"intervention",
"interventions",
"interview",
"interviews",
"intimate",
"intl",
"into",
"intranet",
"intro",
"introduce",
"introduced",
"introduces",
"introducing",
"introduction",
"introductory",
"invalid",
"invasion",
"invention",
"inventory",
"invest",
"investigate",
"investigated",
"investigation",
"investigations",
"investigator",
"investigators",
"investing",
"investment",
"investments",
"investor",
"investors",
"invisible",
"invision",
"invitation",
"invitations",
"invite",
"invited",
"invoice",
"involve",
"involved",
"involvement",
"involves",
"involving",
"ion",
"iowa",
"ipaq",
"ipod",
"ips",
"ira",
"iran",
"iraq",
"iraqi",
"irc",
"ireland",
"irish",
"iron",
"irrigation",
"irs",
"isa",
"isaac",
"isbn",
"islam",
"islamic",
"island",
"islands",
"isle",
"iso",
"isolated",
"isolation",
"isp",
"israel",
"israeli",
"issn",
"issue",
"issued",
"issues",
"ist",
"istanbul",
"italia",
"italian",
"italiano",
"italic",
"italy",
"item",
"items",
"its",
"itself",
"itunes",
"ivory",
"jack",
"jacket",
"jackets",
"jackie",
"jackson",
"jacksonville",
"jacob",
"jade",
"jaguar",
"jail",
"jake",
"jam",
"jamaica",
"james",
"jamie",
"jan",
"jane",
"janet",
"january",
"japan",
"japanese",
"jar",
"jason",
"java",
"javascript",
"jay",
"jazz",
"jean",
"jeans",
"jeep",
"jeff",
"jefferson",
"jeffrey",
"jelsoft",
"jennifer",
"jenny",
"jeremy",
"jerry",
"jersey",
"jerusalem",
"jesse",
"jessica",
"jesus",
"jet",
"jets",
"jewel",
"jewellery",
"jewelry",
"jewish",
"jews",
"jill",
"jim",
"jimmy",
"joan",
"job",
"jobs",
"joe",
"joel",
"john",
"johnny",
"johns",
"johnson",
"johnston",
"join",
"joined",
"joining",
"joins",
"joint",
"joke",
"jokes",
"jon",
"jonathan",
"jones",
"jordan",
"jose",
"joseph",
"josh",
"joshua",
"journal",
"journalism",
"journalist",
"journalists",
"journals",
"journey",
"joy",
"joyce",
"jpeg",
"jpg",
"juan",
"judge",
"judges",
"judgment",
"judicial",
"judy",
"juice",
"jul",
"julia",
"julian",
"julie",
"july",
"jump",
"jumping",
"jun",
"junction",
"june",
"jungle",
"junior",
"junk",
"jurisdiction",
"jury",
"just",
"justice",
"justify",
"justin",
"juvenile",
"jvc",
"kai",
"kansas",
"karaoke",
"karen",
"karl",
"karma",
"kate",
"kathy",
"katie",
"katrina",
"kay",
"kazakhstan",
"kde",
"keen",
"keep",
"keeping",
"keeps",
"keith",
"kelkoo",
"kelly",
"ken",
"kennedy",
"kenneth",
"kenny",
"keno",
"kent",
"kentucky",
"kenya",
"kept",
"kernel",
"kerry",
"kevin",
"key",
"keyboard",
"keyboards",
"keys",
"keyword",
"keywords",
"kick",
"kid",
"kidney",
"kids",
"kijiji",
"kill",
"killed",
"killer",
"killing",
"kills",
"kilometers",
"kim",
"kinase",
"kind",
"kinda",
"kinds",
"king",
"kingdom",
"kings",
"kingston",
"kirk",
"kiss",
"kissing",
"kit",
"kitchen",
"kits",
"kitty",
"klein",
"knee",
"knew",
"knife",
"knight",
"knights",
"knit",
"knitting",
"knives",
"knock",
"know",
"knowing",
"knowledge",
"knowledgestorm",
"known",
"knows",
"kodak",
"kong",
"korea",
"korean",
"kruger",
"kurt",
"kuwait",
"kyle",
"lab",
"label",
"labeled",
"labels",
"labor",
"laboratories",
"laboratory",
"labs",
"lace",
"lack",
"ladder",
"laden",
"ladies",
"lady",
"lafayette",
"laid",
"lake",
"lakes",
"lamb",
"lambda",
"lamp",
"lamps",
"lan",
"lancaster",
"lance",
"land",
"landing",
"lands",
"landscape",
"landscapes",
"lane",
"lanes",
"lang",
"language",
"languages",
"lanka",
"laos",
"lap",
"laptop",
"laptops",
"large",
"largely",
"larger",
"largest",
"larry",
"las",
"laser",
"last",
"lasting",
"lat",
"late",
"lately",
"later",
"latest",
"latex",
"latin",
"latina",
"latinas",
"latino",
"latitude",
"latter",
"latvia",
"lauderdale",
"laugh",
"laughing",
"launch",
"launched",
"launches",
"laundry",
"laura",
"lauren",
"law",
"lawn",
"lawrence",
"laws",
"lawsuit",
"lawyer",
"lawyers",
"lay",
"layer",
"layers",
"layout",
"lazy",
"lbs",
"lcd",
"lead",
"leader",
"leaders",
"leadership",
"leading",
"leads",
"leaf",
"league",
"lean",
"learn",
"learned",
"learners",
"learning",
"lease",
"leasing",
"least",
"leather",
"leave",
"leaves",
"leaving",
"lebanon",
"lecture",
"lectures",
"led",
"lee",
"leeds",
"left",
"leg",
"legacy",
"legal",
"legally",
"legend",
"legendary",
"legends",
"legislation",
"legislative",
"legislature",
"legitimate",
"legs",
"leisure",
"lemon",
"len",
"lender",
"lenders",
"lending",
"length",
"lens",
"lenses",
"leo",
"leon",
"leonard",
"leone",
"les",
"lesbian",
"lesbians",
"leslie",
"less",
"lesser",
"lesson",
"lessons",
"let",
"lets",
"letter",
"letters",
"letting",
"leu",
"level",
"levels",
"levitra",
"levy",
"lewis",
"lexington",
"lexmark",
"lexus",
"liabilities",
"liability",
"liable",
"lib",
"liberal",
"liberia",
"liberty",
"librarian",
"libraries",
"library",
"libs",
"licence",
"license",
"licensed",
"licenses",
"licensing",
"licking",
"lid",
"lie",
"liechtenstein",
"lies",
"life",
"lifestyle",
"lifetime",
"lift",
"light",
"lightbox",
"lighter",
"lighting",
"lightning",
"lights",
"lightweight",
"like",
"liked",
"likelihood",
"likely",
"likes",
"likewise",
"lil",
"lime",
"limit",
"limitation",
"limitations",
"limited",
"limiting",
"limits",
"limousines",
"lincoln",
"linda",
"lindsay",
"line",
"linear",
"lined",
"lines",
"lingerie",
"link",
"linked",
"linking",
"links",
"linux",
"lion",
"lions",
"lip",
"lips",
"liquid",
"lisa",
"list",
"listed",
"listen",
"listening",
"listing",
"listings",
"listprice",
"lists",
"lit",
"lite",
"literacy",
"literally",
"literary",
"literature",
"lithuania",
"litigation",
"little",
"live",
"livecam",
"lived",
"liver",
"liverpool",
"lives",
"livestock",
"living",
"liz",
"llc",
"lloyd",
"llp",
"load",
"loaded",
"loading",
"loads",
"loan",
"loans",
"lobby",
"loc",
"local",
"locale",
"locally",
"locate",
"located",
"location",
"locations",
"locator",
"lock",
"locked",
"locking",
"locks",
"lodge",
"lodging",
"log",
"logan",
"logged",
"logging",
"logic",
"logical",
"login",
"logistics",
"logitech",
"logo",
"logos",
"logs",
"lol",
"london",
"lone",
"lonely",
"long",
"longer",
"longest",
"longitude",
"look",
"looked",
"looking",
"looks",
"looksmart",
"lookup",
"loop",
"loops",
"loose",
"lopez",
"lord",
"los",
"lose",
"losing",
"loss",
"losses",
"lost",
"lot",
"lots",
"lottery",
"lotus",
"lou",
"loud",
"louis",
"louise",
"louisiana",
"louisville",
"lounge",
"love",
"loved",
"lovely",
"lover",
"lovers",
"loves",
"loving",
"low",
"lower",
"lowest",
"lows",
"ltd",
"lucas",
"lucia",
"luck",
"lucky",
"lucy",
"luggage",
"luis",
"luke",
"lunch",
"lung",
"luther",
"luxembourg",
"luxury",
"lycos",
"lying",
"lynn",
"lyric",
"lyrics",
"mac",
"macedonia",
"machine",
"machinery",
"machines",
"macintosh",
"macro",
"macromedia",
"mad",
"madagascar",
"made",
"madison",
"madness",
"madonna",
"madrid",
"mae",
"mag",
"magazine",
"magazines",
"magic",
"magical",
"magnet",
"magnetic",
"magnificent",
"magnitude",
"mai",
"maiden",
"mail",
"mailed",
"mailing",
"mailman",
"mails",
"mailto",
"main",
"maine",
"mainland",
"mainly",
"mainstream",
"maintain",
"maintained",
"maintaining",
"maintains",
"maintenance",
"major",
"majority",
"make",
"maker",
"makers",
"makes",
"makeup",
"making",
"malawi",
"malaysia",
"maldives",
"male",
"males",
"mali",
"mall",
"malpractice",
"malta",
"mambo",
"man",
"manage",
"managed",
"management",
"manager",
"managers",
"managing",
"manchester",
"mandate",
"mandatory",
"manga",
"manhattan",
"manitoba",
"manner",
"manor",
"manual",
"manually",
"manuals",
"manufacture",
"manufactured",
"manufacturer",
"manufacturers",
"manufacturing",
"many",
"map",
"maple",
"mapping",
"maps",
"mar",
"marathon",
"marble",
"marc",
"march",
"marco",
"marcus",
"mardi",
"margaret",
"margin",
"maria",
"mariah",
"marie",
"marijuana",
"marilyn",
"marina",
"marine",
"mario",
"marion",
"maritime",
"mark",
"marked",
"marker",
"markers",
"market",
"marketing",
"marketplace",
"markets",
"marking",
"marks",
"marriage",
"married",
"marriott",
"mars",
"marsh",
"marshall",
"mart",
"martha",
"martial",
"martin",
"marvel",
"mary",
"maryland",
"mas",
"mask",
"mason",
"mass",
"massachusetts",
"massage",
"massive",
"master",
"mastercard",
"masters",
"mat",
"match",
"matched",
"matches",
"matching",
"mate",
"material",
"materials",
"maternity",
"math",
"mathematical",
"mathematics",
"mating",
"matrix",
"mats",
"matt",
"matter",
"matters",
"matthew",
"mattress",
"mature",
"maui",
"mauritius",
"max",
"maximize",
"maximum",
"may",
"maybe",
"mayor",
"mazda",
"mba",
"mcdonald",
"meal",
"meals",
"mean",
"meaning",
"meaningful",
"means",
"meant",
"meanwhile",
"measure",
"measured",
"measurement",
"measurements",
"measures",
"measuring",
"meat",
"mechanical",
"mechanics",
"mechanism",
"mechanisms",
"med",
"medal",
"media",
"median",
"mediawiki",
"medicaid",
"medical",
"medicare",
"medication",
"medications",
"medicine",
"medicines",
"medieval",
"meditation",
"mediterranean",
"medium",
"medline",
"meet",
"meeting",
"meetings",
"meets",
"meetup",
"mega",
"mel",
"melbourne",
"melissa",
"mem",
"member",
"members",
"membership",
"membrane",
"memo",
"memorabilia",
"memorial",
"memories",
"memory",
"memphis",
"men",
"mens",
"ment",
"mental",
"mention",
"mentioned",
"mentor",
"menu",
"menus",
"mercedes",
"merchandise",
"merchant",
"merchants",
"mercury",
"mercy",
"mere",
"merely",
"merge",
"merger",
"merit",
"merry",
"mesa",
"mesh",
"mess",
"message",
"messages",
"messaging",
"messenger",
"met",
"meta",
"metabolism",
"metadata",
"metal",
"metallic",
"metallica",
"metals",
"meter",
"meters",
"method",
"methodology",
"methods",
"metres",
"metric",
"metro",
"metropolitan",
"mexican",
"mexico",
"meyer",
"mhz",
"mia",
"miami",
"mic",
"mice",
"michael",
"michel",
"michelle",
"michigan",
"micro",
"microphone",
"microsoft",
"microwave",
"mid",
"middle",
"midi",
"midlands",
"midnight",
"midwest",
"might",
"mighty",
"migration",
"mike",
"mil",
"milan",
"mild",
"mile",
"mileage",
"miles",
"military",
"milk",
"mill",
"millennium",
"miller",
"million",
"millions",
"mills",
"milton",
"milwaukee",
"mime",
"min",
"mind",
"minds",
"mine",
"mineral",
"minerals",
"mines",
"mini",
"miniature",
"minimal",
"minimize",
"minimum",
"mining",
"minister",
"ministers",
"ministries",
"ministry",
"minneapolis",
"minnesota",
"minolta",
"minor",
"minority",
"mins",
"mint",
"minus",
"minute",
"minutes",
"miracle",
"mirror",
"mirrors",
"misc",
"miscellaneous",
"miss",
"missed",
"missile",
"missing",
"mission",
"missions",
"mississippi",
"missouri",
"mistake",
"mistakes",
"mistress",
"mit",
"mitchell",
"mitsubishi",
"mix",
"mixed",
"mixer",
"mixing",
"mixture",
"mlb",
"mls",
"mobile",
"mobiles",
"mobility",
"mod",
"mode",
"model",
"modeling",
"modelling",
"models",
"modem",
"modems",
"moderate",
"moderator",
"moderators",
"modern",
"modes",
"modification",
"modifications",
"modified",
"modify",
"mods",
"modular",
"module",
"modules",
"moisture",
"mold",
"moldova",
"molecular",
"molecules",
"mom",
"moment",
"moments",
"momentum",
"moms",
"mon",
"monaco",
"monday",
"monetary",
"money",
"mongolia",
"monica",
"monitor",
"monitored",
"monitoring",
"monitors",
"monkey",
"mono",
"monroe",
"monster",
"monsters",
"montana",
"monte",
"montgomery",
"month",
"monthly",
"months",
"montreal",
"mood",
"moon",
"moore",
"moral",
"more",
"moreover",
"morgan",
"morning",
"morocco",
"morris",
"morrison",
"mortality",
"mortgage",
"mortgages",
"moscow",
"moses",
"moss",
"most",
"mostly",
"motel",
"motels",
"mother",
"motherboard",
"mothers",
"motion",
"motivated",
"motivation",
"motor",
"motorcycle",
"motorcycles",
"motorola",
"motors",
"mount",
"mountain",
"mountains",
"mounted",
"mounting",
"mounts",
"mouse",
"mouth",
"move",
"moved",
"movement",
"movements",
"movers",
"moves",
"movie",
"movies",
"moving",
"mozambique",
"mozilla",
"mpeg",
"mpegs",
"mpg",
"mph",
"mrna",
"mrs",
"msg",
"msgid",
"msgstr",
"msie",
"msn",
"mtv",
"much",
"mud",
"mug",
"multi",
"multimedia",
"multiple",
"mumbai",
"munich",
"municipal",
"municipality",
"murder",
"murphy",
"murray",
"muscle",
"muscles",
"museum",
"museums",
"music",
"musical",
"musician",
"musicians",
"muslim",
"muslims",
"must",
"mustang",
"mutual",
"muze",
"myanmar",
"myers",
"myrtle",
"myself",
"mysimon",
"myspace",
"mysql",
"mysterious",
"mystery",
"myth",
"nail",
"nails",
"naked",
"nam",
"name",
"named",
"namely",
"names",
"namespace",
"namibia",
"nancy",
"nano",
"naples",
"narrative",
"narrow",
"nasa",
"nascar",
"nasdaq",
"nashville",
"nasty",
"nat",
"nathan",
"nation",
"national",
"nationally",
"nations",
"nationwide",
"native",
"nato",
"natural",
"naturally",
"naturals",
"nature",
"naughty",
"nav",
"naval",
"navigate",
"navigation",
"navigator",
"navy",
"nba",
"nbc",
"ncaa",
"near",
"nearby",
"nearest",
"nearly",
"nebraska",
"nec",
"necessarily",
"necessary",
"necessity",
"neck",
"necklace",
"need",
"needed",
"needle",
"needs",
"negative",
"negotiation",
"negotiations",
"neighbor",
"neighborhood",
"neighbors",
"neil",
"neither",
"nelson",
"neo",
"neon",
"nepal",
"nerve",
"nervous",
"nest",
"nested",
"net",
"netherlands",
"netscape",
"network",
"networking",
"networks",
"neural",
"neutral",
"nevada",
"never",
"nevertheless",
"new",
"newark",
"newbie",
"newcastle",
"newer",
"newest",
"newfoundland",
"newly",
"newman",
"newport",
"news",
"newsletter",
"newsletters",
"newspaper",
"newspapers",
"newton",
"next",
"nextel",
"nfl",
"nhl",
"nhs",
"niagara",
"nicaragua",
"nice",
"nicholas",
"nick",
"nickel",
"nickname",
"nicole",
"niger",
"nigeria",
"night",
"nightlife",
"nightmare",
"nights",
"nike",
"nikon",
"nil",
"nine",
"nintendo",
"nirvana",
"nissan",
"nitrogen",
"noble",
"nobody",
"node",
"nodes",
"noise",
"nokia",
"nominated",
"nomination",
"nominations",
"non",
"none",
"nonprofit",
"noon",
"nor",
"norfolk",
"norm",
"normal",
"normally",
"norman",
"north",
"northeast",
"northern",
"northwest",
"norton",
"norway",
"norwegian",
"nose",
"not",
"note",
"notebook",
"notebooks",
"noted",
"notes",
"nothing",
"notice",
"noticed",
"notices",
"notification",
"notifications",
"notified",
"notify",
"notion",
"notre",
"nottingham",
"nov",
"nova",
"novel",
"novels",
"novelty",
"november",
"now",
"nowhere",
"nsw",
"ntsc",
"nuclear",
"nudist",
"nuke",
"null",
"number",
"numbers",
"numeric",
"numerical",
"numerous",
"nurse",
"nursery",
"nurses",
"nursing",
"nut",
"nutrition",
"nutritional",
"nuts",
"nutten",
"nvidia",
"nyc",
"nylon",
"oak",
"oakland",
"oaks",
"oasis",
"obesity",
"obituaries",
"obj",
"object",
"objective",
"objectives",
"objects",
"obligation",
"obligations",
"observation",
"observations",
"observe",
"observed",
"observer",
"obtain",
"obtained",
"obtaining",
"obvious",
"obviously",
"occasion",
"occasional",
"occasionally",
"occasions",
"occupation",
"occupational",
"occupations",
"occupied",
"occur",
"occurred",
"occurrence",
"occurring",
"occurs",
"ocean",
"oclc",
"oct",
"october",
"odd",
"odds",
"oecd",
"oem",
"off",
"offense",
"offensive",
"offer",
"offered",
"offering",
"offerings",
"offers",
"office",
"officer",
"officers",
"offices",
"official",
"officially",
"officials",
"offline",
"offset",
"offshore",
"often",
"ohio",
"oil",
"oils",
"okay",
"oklahoma",
"old",
"older",
"oldest",
"olive",
"oliver",
"olympic",
"olympics",
"olympus",
"omaha",
"oman",
"omega",
"omissions",
"once",
"one",
"ones",
"ongoing",
"onion",
"online",
"only",
"ons",
"ontario",
"onto",
"ooo",
"oops",
"open",
"opened",
"opening",
"openings",
"opens",
"opera",
"operate",
"operated",
"operates",
"operating",
"operation",
"operational",
"operations",
"operator",
"operators",
"opinion",
"opinions",
"opponent",
"opponents",
"opportunities",
"opportunity",
"opposed",
"opposite",
"opposition",
"opt",
"optical",
"optics",
"optimal",
"optimization",
"optimize",
"optimum",
"option",
"optional",
"options",
"oracle",
"oral",
"orange",
"orbit",
"orchestra",
"order",
"ordered",
"ordering",
"orders",
"ordinance",
"ordinary",
"oregon",
"org",
"organ",
"organic",
"organisation",
"organisations",
"organisms",
"organization",
"organizational",
"organizations",
"organize",
"organized",
"organizer",
"organizing",
"oriental",
"orientation",
"oriented",
"origin",
"original",
"originally",
"origins",
"orlando",
"orleans",
"oscar",
"other",
"others",
"otherwise",
"ottawa",
"ought",
"our",
"ours",
"ourselves",
"out",
"outcome",
"outcomes",
"outdoor",
"outdoors",
"outer",
"outlet",
"outlets",
"outline",
"outlined",
"outlook",
"output",
"outputs",
"outreach",
"outside",
"outsourcing",
"outstanding",
"oval",
"oven",
"over",
"overall",
"overcome",
"overhead",
"overnight",
"overseas",
"overview",
"owen",
"own",
"owned",
"owner",
"owners",
"ownership",
"owns",
"oxford",
"oxide",
"oxygen",
"ozone",
"pac",
"pace",
"pacific",
"pack",
"package",
"packages",
"packaging",
"packard",
"packed",
"packet",
"packets",
"packing",
"packs",
"pad",
"pads",
"page",
"pages",
"paid",
"pain",
"painful",
"paint",
"paintball",
"painted",
"painting",
"paintings",
"pair",
"pairs",
"pakistan",
"pal",
"palace",
"pale",
"palestine",
"palestinian",
"palm",
"palmer",
"pam",
"pamela",
"pan",
"panama",
"panasonic",
"panel",
"panels",
"panic",
"pants",
"pantyhose",
"paper",
"paperback",
"paperbacks",
"papers",
"papua",
"par",
"para",
"parade",
"paradise",
"paragraph",
"paragraphs",
"paraguay",
"parallel",
"parameter",
"parameters",
"parcel",
"parent",
"parental",
"parenting",
"parents",
"paris",
"parish",
"park",
"parker",
"parking",
"parks",
"parliament",
"parliamentary",
"part",
"partial",
"partially",
"participant",
"participants",
"participate",
"participated",
"participating",
"participation",
"particle",
"particles",
"particular",
"particularly",
"parties",
"partition",
"partly",
"partner",
"partners",
"partnership",
"partnerships",
"parts",
"party",
"pas",
"paso",
"pass",
"passage",
"passed",
"passenger",
"passengers",
"passes",
"passing",
"passion",
"passive",
"passport",
"password",
"passwords",
"past",
"pasta",
"paste",
"pastor",
"pat",
"patch",
"patches",
"patent",
"patents",
"path",
"pathology",
"paths",
"patient",
"patients",
"patio",
"patricia",
"patrick",
"patrol",
"pattern",
"patterns",
"paul",
"pavilion",
"paxil",
"pay",
"payable",
"payday",
"paying",
"payment",
"payments",
"paypal",
"payroll",
"pays",
"pci",
"pcs",
"pct",
"pda",
"pdas",
"pdf",
"pdt",
"peace",
"peaceful",
"peak",
"pearl",
"peas",
"pediatric",
"pee",
"peeing",
"peer",
"peers",
"pen",
"penalties",
"penalty",
"pencil",
"pendant",
"pending",
"penetration",
"penguin",
"peninsula",
"penn",
"pennsylvania",
"penny",
"pens",
"pension",
"pensions",
"pentium",
"people",
"peoples",
"pepper",
"per",
"perceived",
"percent",
"percentage",
"perception",
"perfect",
"perfectly",
"perform",
"performance",
"performances",
"performed",
"performer",
"performing",
"performs",
"perfume",
"perhaps",
"period",
"periodic",
"periodically",
"periods",
"peripheral",
"peripherals",
"perl",
"permalink",
"permanent",
"permission",
"permissions",
"permit",
"permits",
"permitted",
"perry",
"persian",
"persistent",
"person",
"personal",
"personality",
"personalized",
"personally",
"personals",
"personnel",
"persons",
"perspective",
"perspectives",
"perth",
"peru",
"pest",
"pet",
"pete",
"peter",
"petersburg",
"peterson",
"petite",
"petition",
"petroleum",
"pets",
"pgp",
"phantom",
"pharmaceutical",
"pharmaceuticals",
"pharmacies",
"pharmacology",
"pharmacy",
"phase",
"phases",
"phd",
"phenomenon",
"phentermine",
"phi",
"phil",
"philadelphia",
"philip",
"philippines",
"philips",
"phillips",
"philosophy",
"phoenix",
"phone",
"phones",
"photo",
"photograph",
"photographer",
"photographers",
"photographic",
"photographs",
"photography",
"photos",
"photoshop",
"php",
"phpbb",
"phrase",
"phrases",
"phys",
"physical",
"physically",
"physician",
"physicians",
"physics",
"physiology",
"piano",
"pic",
"pichunter",
"pick",
"picked",
"picking",
"picks",
"pickup",
"picnic",
"pics",
"picture",
"pictures",
"pie",
"piece",
"pieces",
"pierce",
"pierre",
"pig",
"pike",
"pill",
"pillow",
"pills",
"pilot",
"pin",
"pine",
"ping",
"pink",
"pins",
"pioneer",
"pipe",
"pipeline",
"pipes",
"pirates",
"pit",
"pitch",
"pittsburgh",
"pix",
"pixel",
"pixels",
"pizza",
"place",
"placed",
"placement",
"places",
"placing",
"plain",
"plains",
"plaintiff",
"plan",
"plane",
"planes",
"planet",
"planets",
"planned",
"planner",
"planners",
"planning",
"plans",
"plant",
"plants",
"plasma",
"plastic",
"plastics",
"plate",
"plates",
"platform",
"platforms",
"platinum",
"play",
"playback",
"played",
"player",
"players",
"playing",
"playlist",
"plays",
"playstation",
"plaza",
"plc",
"pleasant",
"please",
"pleased",
"pleasure",
"pledge",
"plenty",
"plot",
"plots",
"plug",
"plugin",
"plugins",
"plumbing",
"plus",
"plymouth",
"pmc",
"pmid",
"pocket",
"pockets",
"pod",
"podcast",
"podcasts",
"poem",
"poems",
"poet",
"poetry",
"point",
"pointed",
"pointer",
"pointing",
"points",
"poison",
"pokemon",
"poker",
"poland",
"polar",
"pole",
"police",
"policies",
"policy",
"polish",
"polished",
"political",
"politicians",
"politics",
"poll",
"polls",
"pollution",
"polo",
"poly",
"polyester",
"polymer",
"polyphonic",
"pond",
"pontiac",
"pool",
"pools",
"poor",
"pop",
"pope",
"popular",
"popularity",
"population",
"populations",
"por",
"porcelain",
"pork",
"porsche",
"port",
"portable",
"portal",
"porter",
"portfolio",
"portion",
"portions",
"portland",
"portrait",
"portraits",
"ports",
"portsmouth",
"portugal",
"portuguese",
"pos",
"pose",
"posing",
"position",
"positioning",
"positions",
"positive",
"possess",
"possession",
"possibilities",
"possibility",
"possible",
"possibly",
"post",
"postage",
"postal",
"postcard",
"postcards",
"posted",
"poster",
"posters",
"posting",
"postings",
"postposted",
"posts",
"pot",
"potato",
"potatoes",
"potential",
"potentially",
"potter",
"pottery",
"poultry",
"pound",
"pounds",
"pour",
"poverty",
"powder",
"powell",
"power",
"powered",
"powerful",
"powerpoint",
"powers",
"powerseller",
"ppc",
"ppm",
"practical",
"practice",
"practices",
"practitioner",
"practitioners",
"prague",
"prairie",
"praise",
"pray",
"prayer",
"prayers",
"pre",
"preceding",
"precious",
"precipitation",
"precise",
"precisely",
"precision",
"predict",
"predicted",
"prediction",
"predictions",
"prefer",
"preference",
"preferences",
"preferred",
"prefers",
"prefix",
"pregnancy",
"pregnant",
"preliminary",
"premier",
"premiere",
"premises",
"premium",
"prep",
"prepaid",
"preparation",
"prepare",
"prepared",
"preparing",
"prerequisite",
"prescribed",
"prescription",
"presence",
"present",
"presentation",
"presentations",
"presented",
"presenting",
"presently",
"presents",
"preservation",
"preserve",
"president",
"presidential",
"press",
"pressed",
"pressing",
"pressure",
"preston",
"pretty",
"prev",
"prevent",
"preventing",
"prevention",
"preview",
"previews",
"previous",
"previously",
"price",
"priced",
"prices",
"pricing",
"pride",
"priest",
"primarily",
"primary",
"prime",
"prince",
"princess",
"princeton",
"principal",
"principle",
"principles",
"print",
"printable",
"printed",
"printer",
"printers",
"printing",
"prints",
"prior",
"priorities",
"priority",
"prison",
"prisoner",
"prisoners",
"privacy",
"private",
"privilege",
"privileges",
"prix",
"prize",
"prizes",
"pro",
"probability",
"probably",
"probe",
"problem",
"problems",
"proc",
"procedure",
"procedures",
"proceed",
"proceeding",
"proceedings",
"proceeds",
"process",
"processed",
"processes",
"processing",
"processor",
"processors",
"procurement",
"produce",
"produced",
"producer",
"producers",
"produces",
"producing",
"product",
"production",
"productions",
"productive",
"productivity",
"products",
"profession",
"professional",
"professionals",
"professor",
"profile",
"profiles",
"profit",
"profits",
"program",
"programme",
"programmer",
"programmers",
"programmes",
"programming",
"programs",
"progress",
"progressive",
"prohibited",
"project",
"projected",
"projection",
"projector",
"projectors",
"projects",
"prominent",
"promise",
"promised",
"promises",
"promising",
"promo",
"promote",
"promoted",
"promotes",
"promoting",
"promotion",
"promotional",
"promotions",
"prompt",
"promptly",
"proof",
"propecia",
"proper",
"properly",
"properties",
"property",
"prophet",
"proportion",
"proposal",
"proposals",
"propose",
"proposed",
"proposition",
"proprietary",
"pros",
"prospect",
"prospective",
"prospects",
"prostate",
"prostores",
"prot",
"protect",
"protected",
"protecting",
"protection",
"protective",
"protein",
"proteins",
"protest",
"protocol",
"protocols",
"prototype",
"proud",
"proudly",
"prove",
"proved",
"proven",
"provide",
"provided",
"providence",
"provider",
"providers",
"provides",
"providing",
"province",
"provinces",
"provincial",
"provision",
"provisions",
"proxy",
"prozac",
"psi",
"psp",
"pst",
"psychiatry",
"psychological",
"psychology",
"pts",
"pty",
"pub",
"public",
"publication",
"publications",
"publicity",
"publicly",
"publish",
"published",
"publisher",
"publishers",
"publishing",
"pubmed",
"pubs",
"puerto",
"pull",
"pulled",
"pulling",
"pulse",
"pump",
"pumps",
"punch",
"punishment",
"punk",
"pupils",
"puppy",
"purchase",
"purchased",
"purchases",
"purchasing",
"pure",
"purple",
"purpose",
"purposes",
"purse",
"pursuant",
"pursue",
"pursuit",
"push",
"pushed",
"pushing",
"put",
"puts",
"putting",
"puzzle",
"puzzles",
"pvc",
"python",
"qatar",
"qld",
"qty",
"quad",
"qualification",
"qualifications",
"qualified",
"qualify",
"qualifying",
"qualities",
"quality",
"quantitative",
"quantities",
"quantity",
"quantum",
"quarter",
"quarterly",
"quarters",
"que",
"quebec",
"queen",
"queens",
"queensland",
"queries",
"query",
"quest",
"question",
"questionnaire",
"questions",
"queue",
"qui",
"quick",
"quickly",
"quiet",
"quilt",
"quit",
"quite",
"quiz",
"quizzes",
"quotations",
"quote",
"quoted",
"quotes",
"rabbit",
"race",
"races",
"rachel",
"racial",
"racing",
"rack",
"racks",
"radar",
"radiation",
"radical",
"radio",
"radios",
"radius",
"rage",
"raid",
"rail",
"railroad",
"railway",
"rain",
"rainbow",
"raise",
"raised",
"raises",
"raising",
"raleigh",
"rally",
"ralph",
"ram",
"ran",
"ranch",
"rand",
"random",
"randy",
"range",
"ranger",
"rangers",
"ranges",
"ranging",
"rank",
"ranked",
"ranking",
"rankings",
"ranks",
"rap",
"rapid",
"rapidly",
"rapids",
"rare",
"rarely",
"rat",
"rate",
"rated",
"rates",
"rather",
"rating",
"ratings",
"ratio",
"rational",
"ratios",
"rats",
"raw",
"ray",
"raymond",
"rays",
"rca",
"reach",
"reached",
"reaches",
"reaching",
"reaction",
"reactions",
"read",
"reader",
"readers",
"readily",
"reading",
"readings",
"reads",
"ready",
"real",
"realistic",
"reality",
"realize",
"realized",
"really",
"realm",
"realtor",
"realtors",
"realty",
"rear",
"reason",
"reasonable",
"reasonably",
"reasoning",
"reasons",
"rebate",
"rebates",
"rebecca",
"rebel",
"rebound",
"rec",
"recall",
"receipt",
"receive",
"received",
"receiver",
"receivers",
"receives",
"receiving",
"recent",
"recently",
"reception",
"receptor",
"receptors",
"recipe",
"recipes",
"recipient",
"recipients",
"recognition",
"recognize",
"recognized",
"recommend",
"recommendation",
"recommendations",
"recommended",
"recommends",
"reconstruction",
"record",
"recorded",
"recorder",
"recorders",
"recording",
"recordings",
"records",
"recover",
"recovered",
"recovery",
"recreation",
"recreational",
"recruiting",
"recruitment",
"recycling",
"red",
"redeem",
"redhead",
"reduce",
"reduced",
"reduces",
"reducing",
"reduction",
"reductions",
"reed",
"reef",
"reel",
"ref",
"refer",
"reference",
"referenced",
"references",
"referral",
"referrals",
"referred",
"referring",
"refers",
"refinance",
"refine",
"refined",
"reflect",
"reflected",
"reflection",
"reflections",
"reflects",
"reform",
"reforms",
"refresh",
"refrigerator",
"refugees",
"refund",
"refurbished",
"refuse",
"refused",
"reg",
"regard",
"regarded",
"regarding",
"regardless",
"regards",
"reggae",
"regime",
"region",
"regional",
"regions",
"register",
"registered",
"registrar",
"registration",
"registry",
"regression",
"regular",
"regularly",
"regulated",
"regulation",
"regulations",
"regulatory",
"rehab",
"rehabilitation",
"reid",
"reject",
"rejected",
"relate",
"related",
"relates",
"relating",
"relation",
"relations",
"relationship",
"relationships",
"relative",
"relatively",
"relatives",
"relax",
"relaxation",
"relay",
"release",
"released",
"releases",
"relevance",
"relevant",
"reliability",
"reliable",
"reliance",
"relief",
"religion",
"religions",
"religious",
"reload",
"relocation",
"rely",
"relying",
"remain",
"remainder",
"remained",
"remaining",
"remains",
"remark",
"remarkable",
"remarks",
"remedies",
"remedy",
"remember",
"remembered",
"remind",
"reminder",
"remix",
"remote",
"removable",
"removal",
"remove",
"removed",
"removing",
"renaissance",
"render",
"rendered",
"rendering",
"renew",
"renewable",
"renewal",
"reno",
"rent",
"rental",
"rentals",
"rep",
"repair",
"repairs",
"repeat",
"repeated",
"replace",
"replaced",
"replacement",
"replacing",
"replica",
"replication",
"replied",
"replies",
"reply",
"report",
"reported",
"reporter",
"reporters",
"reporting",
"reports",
"repository",
"represent",
"representation",
"representations",
"representative",
"representatives",
"represented",
"representing",
"represents",
"reprint",
"reprints",
"reproduce",
"reproduced",
"reproduction",
"reproductive",
"republic",
"republican",
"republicans",
"reputation",
"request",
"requested",
"requesting",
"requests",
"require",
"required",
"requirement",
"requirements",
"requires",
"requiring",
"res",
"rescue",
"research",
"researcher",
"researchers",
"reseller",
"reservation",
"reservations",
"reserve",
"reserved",
"reserves",
"reservoir",
"reset",
"residence",
"resident",
"residential",
"residents",
"resist",
"resistance",
"resistant",
"resolution",
"resolutions",
"resolve",
"resolved",
"resort",
"resorts",
"resource",
"resources",
"respect",
"respected",
"respective",
"respectively",
"respiratory",
"respond",
"responded",
"respondent",
"respondents",
"responding",
"response",
"responses",
"responsibilities",
"responsibility",
"responsible",
"rest",
"restaurant",
"restaurants",
"restoration",
"restore",
"restored",
"restrict",
"restricted",
"restriction",
"restrictions",
"restructuring",
"result",
"resulted",
"resulting",
"results",
"resume",
"resumes",
"retail",
"retailer",
"retailers",
"retain",
"retained",
"retention",
"retired",
"retirement",
"retreat",
"retrieval",
"retrieve",
"retrieved",
"retro",
"return",
"returned",
"returning",
"returns",
"reunion",
"reuters",
"rev",
"reveal",
"revealed",
"reveals",
"revelation",
"revenge",
"revenue",
"revenues",
"reverse",
"review",
"reviewed",
"reviewer",
"reviewing",
"reviews",
"revised",
"revision",
"revisions",
"revolution",
"revolutionary",
"reward",
"rewards",
"reynolds",
"rfc",
"rhode",
"rhythm",
"ribbon",
"rica",
"rice",
"rich",
"richard",
"richards",
"richardson",
"richmond",
"rick",
"ricky",
"rico",
"rid",
"ride",
"rider",
"riders",
"rides",
"ridge",
"riding",
"right",
"rights",
"rim",
"ring",
"rings",
"ringtone",
"ringtones",
"rio",
"rip",
"ripe",
"rise",
"rising",
"risk",
"risks",
"river",
"rivers",
"riverside",
"rna",
"road",
"roads",
"rob",
"robbie",
"robert",
"roberts",
"robertson",
"robin",
"robinson",
"robot",
"robots",
"robust",
"rochester",
"rock",
"rocket",
"rocks",
"rocky",
"rod",
"roger",
"rogers",
"roland",
"role",
"roles",
"roll",
"rolled",
"roller",
"rolling",
"rolls",
"rom",
"roman",
"romance",
"romania",
"romantic",
"rome",
"ron",
"ronald",
"roof",
"room",
"roommate",
"roommates",
"rooms",
"root",
"roots",
"rope",
"rosa",
"rose",
"roses",
"ross",
"roster",
"rotary",
"rotation",
"rouge",
"rough",
"roughly",
"roulette",
"round",
"rounds",
"route",
"router",
"routers",
"routes",
"routine",
"routines",
"routing",
"rover",
"row",
"rows",
"roy",
"royal",
"royalty",
"rpg",
"rpm",
"rrp",
"rss",
"rubber",
"ruby",
"rug",
"rugby",
"rugs",
"rule",
"ruled",
"rules",
"ruling",
"run",
"runner",
"running",
"runs",
"runtime",
"rural",
"rush",
"russell",
"russia",
"russian",
"ruth",
"rwanda",
"ryan",
"sacramento",
"sacred",
"sacrifice",
"sad",
"saddam",
"safari",
"safe",
"safely",
"safer",
"safety",
"sage",
"sagem",
"said",
"sail",
"sailing",
"saint",
"saints",
"sake",
"salad",
"salaries",
"salary",
"sale",
"salem",
"sales",
"sally",
"salmon",
"salon",
"salt",
"salvador",
"salvation",
"sam",
"samba",
"same",
"samoa",
"sample",
"samples",
"sampling",
"samsung",
"samuel",
"san",
"sand",
"sandra",
"sandwich",
"sandy",
"sans",
"santa",
"sanyo",
"sao",
"sap",
"sapphire",
"sara",
"sarah",
"sas",
"saskatchewan",
"sat",
"satellite",
"satin",
"satisfaction",
"satisfactory",
"satisfied",
"satisfy",
"saturday",
"saturn",
"sauce",
"saudi",
"savage",
"savannah",
"save",
"saved",
"saver",
"saves",
"saving",
"savings",
"saw",
"say",
"saying",
"says",
"sbjct",
"scale",
"scales",
"scan",
"scanned",
"scanner",
"scanners",
"scanning",
"scared",
"scary",
"scenario",
"scenarios",
"scene",
"scenes",
"scenic",
"schedule",
"scheduled",
"schedules",
"scheduling",
"schema",
"scheme",
"schemes",
"scholar",
"scholars",
"scholarship",
"scholarships",
"school",
"schools",
"sci",
"science",
"sciences",
"scientific",
"scientist",
"scientists",
"scoop",
"scope",
"score",
"scored",
"scores",
"scoring",
"scotia",
"scotland",
"scott",
"scottish",
"scout",
"scratch",
"screen",
"screening",
"screens",
"screensaver",
"screensavers",
"screenshot",
"screenshots",
"screw",
"script",
"scripting",
"scripts",
"scroll",
"scsi",
"scuba",
"sculpture",
"sea",
"seafood",
"seal",
"sealed",
"sean",
"search",
"searched",
"searches",
"searching",
"seas",
"season",
"seasonal",
"seasons",
"seat",
"seating",
"seats",
"seattle",
"sec",
"second",
"secondary",
"seconds",
"secret",
"secretariat",
"secretary",
"secrets",
"section",
"sections",
"sector",
"sectors",
"secure",
"secured",
"securely",
"securities",
"security",
"see",
"seed",
"seeds",
"seeing",
"seek",
"seeker",
"seekers",
"seeking",
"seeks",
"seem",
"seemed",
"seems",
"seen",
"sees",
"sega",
"segment",
"segments",
"select",
"selected",
"selecting",
"selection",
"selections",
"selective",
"self",
"sell",
"seller",
"sellers",
"selling",
"sells",
"semester",
"semi",
"semiconductor",
"seminar",
"seminars",
"sen",
"senate",
"senator",
"senators",
"send",
"sender",
"sending",
"sends",
"senegal",
"senior",
"seniors",
"sense",
"sensitive",
"sensitivity",
"sensor",
"sensors",
"sent",
"sentence",
"sentences",
"seo",
"sep",
"separate",
"separated",
"separately",
"separation",
"sept",
"september",
"seq",
"sequence",
"sequences",
"ser",
"serbia",
"serial",
"series",
"serious",
"seriously",
"serum",
"serve",
"served",
"server",
"servers",
"serves",
"service",
"services",
"serving",
"session",
"sessions",
"set",
"sets",
"setting",
"settings",
"settle",
"settled",
"settlement",
"setup",
"seven",
"seventh",
"several",
"severe",
"sewing",
"sexual",
"sexuality",
"sexually",
"shade",
"shades",
"shadow",
"shadows",
"shaft",
"shake",
"shakespeare",
"shakira",
"shall",
"shame",
"shanghai",
"shannon",
"shape",
"shaped",
"shapes",
"share",
"shared",
"shareholders",
"shares",
"shareware",
"sharing",
"shark",
"sharon",
"sharp",
"shaved",
"shaw",
"she",
"shed",
"sheep",
"sheer",
"sheet",
"sheets",
"sheffield",
"shelf",
"shell",
"shelter",
"shepherd",
"sheriff",
"sherman",
"shield",
"shift",
"shine",
"ship",
"shipment",
"shipments",
"shipped",
"shipping",
"ships",
"shirt",
"shirts",
"shock",
"shoe",
"shoes",
"shoot",
"shooting",
"shop",
"shopper",
"shoppers",
"shopping",
"shops",
"shopzilla",
"shore",
"short",
"shortcuts",
"shorter",
"shortly",
"shorts",
"shot",
"shots",
"should",
"shoulder",
"show",
"showcase",
"showed",
"shower",
"showers",
"showing",
"shown",
"shows",
"showtimes",
"shut",
"shuttle",
"sic",
"sick",
"side",
"sides",
"sie",
"siemens",
"sierra",
"sig",
"sight",
"sigma",
"sign",
"signal",
"signals",
"signature",
"signatures",
"signed",
"significance",
"significant",
"significantly",
"signing",
"signs",
"signup",
"silence",
"silent",
"silicon",
"silk",
"silly",
"silver",
"sim",
"similar",
"similarly",
"simon",
"simple",
"simplified",
"simply",
"simpson",
"simpsons",
"sims",
"simulation",
"simulations",
"simultaneously",
"sin",
"since",
"sing",
"singapore",
"singer",
"singh",
"singing",
"single",
"singles",
"sink",
"sip",
"sir",
"sister",
"sisters",
"sit",
"site",
"sitemap",
"sites",
"sitting",
"situated",
"situation",
"situations",
"six",
"sixth",
"size",
"sized",
"sizes",
"skating",
"ski",
"skiing",
"skill",
"skilled",
"skills",
"skin",
"skins",
"skip",
"skirt",
"skirts",
"sku",
"sky",
"skype",
"slave",
"sleep",
"sleeping",
"sleeps",
"sleeve",
"slide",
"slides",
"slideshow",
"slight",
"slightly",
"slim",
"slip",
"slope",
"slot",
"slots",
"slovak",
"slovakia",
"slovenia",
"slow",
"slowly",
"small",
"smaller",
"smallest",
"smart",
"smell",
"smile",
"smilies",
"smith",
"smithsonian",
"smoke",
"smoking",
"smooth",
"sms",
"smtp",
"snake",
"snap",
"snapshot",
"snow",
"snowboard",
"soa",
"soap",
"soc",
"soccer",
"social",
"societies",
"society",
"sociology",
"socket",
"socks",
"sodium",
"sofa",
"soft",
"softball",
"software",
"soil",
"sol",
"solar",
"solaris",
"sold",
"soldier",
"soldiers",
"sole",
"solely",
"solid",
"solo",
"solomon",
"solution",
"solutions",
"solve",
"solved",
"solving",
"soma",
"somalia",
"some",
"somebody",
"somehow",
"someone",
"somerset",
"something",
"sometimes",
"somewhat",
"somewhere",
"son",
"song",
"songs",
"sonic",
"sons",
"sony",
"soon",
"soonest",
"sophisticated",
"sorry",
"sort",
"sorted",
"sorts",
"sought",
"soul",
"souls",
"sound",
"sounds",
"soundtrack",
"soup",
"source",
"sources",
"south",
"southampton",
"southeast",
"southern",
"southwest",
"soviet",
"sox",
"spa",
"space",
"spaces",
"spain",
"spam",
"span",
"spanish",
"spank",
"spanking",
"sparc",
"spare",
"spas",
"spatial",
"speak",
"speaker",
"speakers",
"speaking",
"speaks",
"spears",
"spec",
"special",
"specialist",
"specialists",
"specialized",
"specializing",
"specially",
"specials",
"specialties",
"specialty",
"species",
"specific",
"specifically",
"specification",
"specifications",
"specifics",
"specified",
"specifies",
"specify",
"specs",
"spectacular",
"spectrum",
"speech",
"speeches",
"speed",
"speeds",
"spell",
"spelling",
"spencer",
"spend",
"spending",
"spent",
"sperm",
"sphere",
"spice",
"spider",
"spies",
"spin",
"spine",
"spirit",
"spirits",
"spiritual",
"spirituality",
"split",
"spoke",
"spoken",
"spokesman",
"sponsor",
"sponsored",
"sponsors",
"sponsorship",
"sport",
"sporting",
"sports",
"spot",
"spotlight",
"spots",
"spouse",
"spray",
"spread",
"spreading",
"spring",
"springer",
"springfield",
"springs",
"sprint",
"spy",
"spyware",
"sql",
"squad",
"square",
"src",
"sri",
"ssl",
"stability",
"stable",
"stack",
"stadium",
"staff",
"staffing",
"stage",
"stages",
"stainless",
"stake",
"stakeholders",
"stamp",
"stamps",
"stan",
"stand",
"standard",
"standards",
"standing",
"standings",
"stands",
"stanford",
"stanley",
"star",
"starring",
"stars",
"starsmerchant",
"start",
"started",
"starter",
"starting",
"starts",
"startup",
"stat",
"state",
"stated",
"statement",
"statements",
"states",
"statewide",
"static",
"stating",
"station",
"stationery",
"stations",
"statistical",
"statistics",
"stats",
"status",
"statute",
"statutes",
"statutory",
"stay",
"stayed",
"staying",
"stays",
"std",
"ste",
"steady",
"steal",
"steam",
"steel",
"steering",
"stem",
"step",
"stephanie",
"stephen",
"steps",
"stereo",
"sterling",
"steve",
"steven",
"stevens",
"stewart",
"stick",
"sticker",
"stickers",
"sticks",
"sticky",
"still",
"stock",
"stockholm",
"stockings",
"stocks",
"stolen",
"stomach",
"stone",
"stones",
"stood",
"stop",
"stopped",
"stopping",
"stops",
"storage",
"store",
"stored",
"stores",
"stories",
"storm",
"story",
"str",
"straight",
"strain",
"strand",
"strange",
"stranger",
"strap",
"strategic",
"strategies",
"strategy",
"stream",
"streaming",
"streams",
"street",
"streets",
"strength",
"strengthen",
"strengthening",
"strengths",
"stress",
"stretch",
"strict",
"strictly",
"strike",
"strikes",
"striking",
"string",
"strings",
"strip",
"stripes",
"strips",
"stroke",
"strong",
"stronger",
"strongly",
"struck",
"struct",
"structural",
"structure",
"structured",
"structures",
"struggle",
"stuart",
"stuck",
"stud",
"student",
"students",
"studied",
"studies",
"studio",
"studios",
"study",
"studying",
"stuff",
"stuffed",
"stunning",
"stupid",
"style",
"styles",
"stylish",
"stylus",
"sub",
"subaru",
"subcommittee",
"subdivision",
"subject",
"subjective",
"subjects",
"sublime",
"sublimedirectory",
"submission",
"submissions",
"submit",
"submitted",
"submitting",
"subscribe",
"subscriber",
"subscribers",
"subscription",
"subscriptions",
"subsection",
"subsequent",
"subsequently",
"subsidiaries",
"subsidiary",
"substance",
"substances",
"substantial",
"substantially",
"substitute",
"subtle",
"suburban",
"succeed",
"success",
"successful",
"successfully",
"such",
"sucking",
"sudan",
"sudden",
"suddenly",
"sue",
"suffer",
"suffered",
"suffering",
"sufficient",
"sufficiently",
"sugar",
"suggest",
"suggested",
"suggesting",
"suggestion",
"suggestions",
"suggests",
"suicide",
"suit",
"suitable",
"suite",
"suited",
"suites",
"suits",
"sullivan",
"sum",
"summaries",
"summary",
"summer",
"summit",
"sun",
"sunday",
"sunglasses",
"sunny",
"sunrise",
"sunset",
"sunshine",
"super",
"superb",
"superintendent",
"superior",
"supervision",
"supervisor",
"supervisors",
"supplement",
"supplemental",
"supplements",
"supplied",
"supplier",
"suppliers",
"supplies",
"supply",
"support",
"supported",
"supporters",
"supporting",
"supports",
"suppose",
"supposed",
"supreme",
"sur",
"sure",
"surely",
"surf",
"surface",
"surfaces",
"surfing",
"surge",
"surgeon",
"surgeons",
"surgery",
"surgical",
"surname",
"surplus",
"surprise",
"surprised",
"surprising",
"surrey",
"surround",
"surrounded",
"surrounding",
"surveillance",
"survey",
"surveys",
"survival",
"survive",
"survivor",
"survivors",
"susan",
"suse",
"suspect",
"suspected",
"suspended",
"suspension",
"sussex",
"sustainability",
"sustainable",
"sustained",
"suzuki",
"swap",
"swaziland",
"sweden",
"swedish",
"sweet",
"swift",
"swim",
"swimming",
"swing",
"swingers",
"swiss",
"switch",
"switched",
"switches",
"switching",
"switzerland",
"sword",
"sydney",
"symantec",
"symbol",
"symbols",
"sympathy",
"symphony",
"symposium",
"symptoms",
"sync",
"syndicate",
"syndication",
"syndrome",
"synopsis",
"syntax",
"synthesis",
"synthetic",
"syracuse",
"syria",
"sys",
"system",
"systematic",
"systems",
"tab",
"table",
"tables",
"tablet",
"tablets",
"tabs",
"tackle",
"tactics",
"tag",
"tagged",
"tags",
"tahoe",
"tail",
"taiwan",
"take",
"taken",
"takes",
"taking",
"tale",
"talent",
"talented",
"tales",
"talk",
"talked",
"talking",
"talks",
"tall",
"tamil",
"tampa",
"tan",
"tank",
"tanks",
"tanzania",
"tap",
"tape",
"tapes",
"tar",
"target",
"targeted",
"targets",
"tariff",
"task",
"tasks",
"taste",
"tattoo",
"taught",
"tax",
"taxation",
"taxes",
"taxi",
"taylor",
"tba",
"tcp",
"tea",
"teach",
"teacher",
"teachers",
"teaches",
"teaching",
"team",
"teams",
"tear",
"tears",
"tech",
"technical",
"technician",
"technique",
"techniques",
"techno",
"technological",
"technologies",
"technology",
"techrepublic",
"ted",
"teddy",
"tee",
"teen",
"teenage",
"teens",
"teeth",
"tel",
"telecharger",
"telecom",
"telecommunications",
"telephone",
"telephony",
"telescope",
"television",
"televisions",
"tell",
"telling",
"tells",
"temp",
"temperature",
"temperatures",
"template",
"templates",
"temple",
"temporal",
"temporarily",
"temporary",
"ten",
"tenant",
"tend",
"tender",
"tennessee",
"tennis",
"tension",
"tent",
"term",
"terminal",
"terminals",
"termination",
"terminology",
"terms",
"terrace",
"terrain",
"terrible",
"territories",
"territory",
"terror",
"terrorism",
"terrorist",
"terrorists",
"terry",
"test",
"testament",
"tested",
"testimonials",
"testimony",
"testing",
"tests",
"tex",
"texas",
"text",
"textbook",
"textbooks",
"textile",
"textiles",
"texts",
"texture",
"tft",
"tgp",
"thai",
"thailand",
"than",
"thank",
"thanks",
"thanksgiving",
"that",
"thats",
"the",
"theater",
"theaters",
"theatre",
"thee",
"theft",
"thehun",
"their",
"them",
"theme",
"themes",
"themselves",
"then",
"theology",
"theorem",
"theoretical",
"theories",
"theory",
"therapeutic",
"therapist",
"therapy",
"there",
"thereafter",
"thereby",
"therefore",
"thereof",
"thermal",
"thesaurus",
"these",
"thesis",
"theta",
"they",
"thick",
"thickness",
"thin",
"thing",
"things",
"think",
"thinking",
"thinkpad",
"thinks",
"third",
"thirty",
"this",
"thomas",
"thompson",
"thomson",
"thong",
"thongs",
"thorough",
"thoroughly",
"those",
"thou",
"though",
"thought",
"thoughts",
"thousand",
"thousands",
"thread",
"threaded",
"threads",
"threat",
"threatened",
"threatening",
"threats",
"three",
"threshold",
"thriller",
"throat",
"through",
"throughout",
"throw",
"throwing",
"thrown",
"throws",
"thru",
"thu",
"thumb",
"thumbnail",
"thumbnails",
"thumbs",
"thumbzilla",
"thunder",
"thursday",
"thus",
"thy",
"ticket",
"tickets",
"tide",
"tie",
"tied",
"tier",
"ties",
"tiffany",
"tiger",
"tigers",
"tight",
"til",
"tile",
"tiles",
"till",
"tim",
"timber",
"time",
"timeline",
"timely",
"timer",
"times",
"timing",
"timothy",
"tin",
"tiny",
"tion",
"tions",
"tip",
"tips",
"tire",
"tired",
"tires",
"tissue",
"titanium",
"titans",
"title",
"titled",
"titles",
"titten",
"tmp",
"tobacco",
"tobago",
"today",
"todd",
"toddler",
"toe",
"together",
"toilet",
"token",
"tokyo",
"told",
"tolerance",
"toll",
"tom",
"tomato",
"tomatoes",
"tommy",
"tomorrow",
"ton",
"tone",
"toner",
"tones",
"tongue",
"tonight",
"tons",
"tony",
"too",
"took",
"tool",
"toolbar",
"toolbox",
"toolkit",
"tools",
"tooth",
"top",
"topic",
"topics",
"tops",
"toronto",
"torture",
"toshiba",
"total",
"totally",
"totals",
"touch",
"touched",
"tough",
"tour",
"touring",
"tourism",
"tourist",
"tournament",
"tournaments",
"tours",
"toward",
"towards",
"tower",
"towers",
"town",
"towns",
"township",
"toxic",
"toy",
"toyota",
"toys",
"trace",
"track",
"trackback",
"trackbacks",
"tracked",
"tracker",
"tracking",
"tracks",
"tract",
"tractor",
"tracy",
"trade",
"trademark",
"trademarks",
"trader",
"trades",
"trading",
"tradition",
"traditional",
"traditions",
"traffic",
"tragedy",
"trail",
"trailer",
"trailers",
"trails",
"train",
"trained",
"trainer",
"trainers",
"training",
"trains",
"tramadol",
"trance",
"trans",
"transaction",
"transactions",
"transcript",
"transcription",
"transcripts",
"transexual",
"transexuales",
"transfer",
"transferred",
"transfers",
"transform",
"transformation",
"transit",
"transition",
"translate",
"translated",
"translation",
"translations",
"translator",
"transmission",
"transmit",
"transmitted",
"transparency",
"transparent",
"transport",
"transportation",
"transsexual",
"trap",
"trash",
"trauma",
"travel",
"traveler",
"travelers",
"traveling",
"traveller",
"travelling",
"travels",
"travesti",
"travis",
"tray",
"treasure",
"treasurer",
"treasures",
"treasury",
"treat",
"treated",
"treating",
"treatment",
"treatments",
"treaty",
"tree",
"trees",
"trek",
"trembl",
"tremendous",
"trend",
"trends",
"treo",
"tri",
"trial",
"trials",
"triangle",
"tribal",
"tribe",
"tribes",
"tribunal",
"tribune",
"tribute",
"trick",
"tricks",
"tried",
"tries",
"trigger",
"trim",
"trinidad",
"trinity",
"trio",
"trip",
"tripadvisor",
"triple",
"trips",
"triumph",
"trivia",
"troops",
"tropical",
"trouble",
"troubleshooting",
"trout",
"troy",
"truck",
"trucks",
"true",
"truly",
"trunk",
"trust",
"trusted",
"trustee",
"trustees",
"trusts",
"truth",
"try",
"trying",
"tsunami",
"tub",
"tube",
"tubes",
"tucson",
"tue",
"tuesday",
"tuition",
"tulsa",
"tumor",
"tune",
"tuner",
"tunes",
"tuning",
"tunisia",
"tunnel",
"turbo",
"turkey",
"turkish",
"turn",
"turned",
"turner",
"turning",
"turns",
"turtle",
"tutorial",
"tutorials",
"tvs",
"twelve",
"twenty",
"twice",
"twiki",
"twin",
"twins",
"twist",
"twisted",
"two",
"tyler",
"type",
"types",
"typical",
"typically",
"typing",
"uganda",
"ugly",
"ukraine",
"ultimate",
"ultimately",
"ultra",
"ultram",
"una",
"unable",
"unauthorized",
"unavailable",
"uncertainty",
"uncle",
"und",
"undefined",
"under",
"undergraduate",
"underground",
"underlying",
"understand",
"understanding",
"understood",
"undertake",
"undertaken",
"underwear",
"undo",
"une",
"unemployment",
"unexpected",
"unfortunately",
"uni",
"unified",
"uniform",
"union",
"unions",
"uniprotkb",
"unique",
"unit",
"united",
"units",
"unity",
"univ",
"universal",
"universe",
"universities",
"university",
"unix",
"unknown",
"unless",
"unlike",
"unlikely",
"unlimited",
"unlock",
"unnecessary",
"unsigned",
"unsubscribe",
"until",
"untitled",
"unto",
"unusual",
"unwrap",
"upc",
"upcoming",
"update",
"updated",
"updates",
"updating",
"upgrade",
"upgrades",
"upgrading",
"upload",
"uploaded",
"upon",
"upper",
"ups",
"upset",
"urban",
"urge",
"urgent",
"uri",
"url",
"urls",
"uruguay",
"urw",
"usa",
"usage",
"usb",
"usc",
"usd",
"usda",
"use",
"used",
"useful",
"user",
"username",
"users",
"uses",
"usgs",
"using",
"usps",
"usr",
"usual",
"usually",
"utah",
"utc",
"utilities",
"utility",
"utilization",
"utilize",
"utils",
"uzbekistan",
"vacancies",
"vacation",
"vacations",
"vaccine",
"vacuum",
"val",
"valentine",
"valid",
"validation",
"validity",
"valium",
"valley",
"valuable",
"valuation",
"value",
"valued",
"values",
"valve",
"valves",
"vampire",
"van",
"vancouver",
"vanilla",
"var",
"variable",
"variables",
"variance",
"variation",
"variations",
"varied",
"varies",
"varieties",
"variety",
"various",
"vary",
"varying",
"vast",
"vat",
"vatican",
"vault",
"vbulletin",
"vcr",
"vector",
"vegas",
"vegetable",
"vegetables",
"vegetarian",
"vegetation",
"vehicle",
"vehicles",
"velocity",
"velvet",
"vendor",
"vendors",
"venezuela",
"venice",
"venture",
"ventures",
"venue",
"venues",
"ver",
"verbal",
"verde",
"verification",
"verified",
"verify",
"verizon",
"vermont",
"vernon",
"verse",
"version",
"versions",
"versus",
"vertex",
"vertical",
"very",
"verzeichnis",
"vessel",
"vessels",
"veteran",
"veterans",
"veterinary",
"vhs",
"via",
"vic",
"vice",
"victim",
"victims",
"victor",
"victoria",
"victorian",
"victory",
"vid",
"video",
"videos",
"vids",
"vienna",
"vietnam",
"vietnamese",
"view",
"viewed",
"viewer",
"viewers",
"viewing",
"viewpicture",
"views",
"vii",
"viii",
"viking",
"villa",
"village",
"villages",
"villas",
"vincent",
"vintage",
"vinyl",
"violation",
"violations",
"violence",
"violent",
"violin",
"vip",
"viral",
"virgin",
"virginia",
"virtual",
"virtually",
"virtue",
"virus",
"viruses",
"visa",
"visibility",
"visible",
"vision",
"visit",
"visited",
"visiting",
"visitor",
"visitors",
"visits",
"vista",
"visual",
"vital",
"vitamin",
"vitamins",
"vocabulary",
"vocal",
"vocals",
"vocational",
"voice",
"voices",
"void",
"voip",
"vol",
"volkswagen",
"volleyball",
"volt",
"voltage",
"volume",
"volumes",
"voluntary",
"volunteer",
"volunteers",
"volvo",
"von",
"vote",
"voted",
"voters",
"votes",
"voting",
"voyeurweb",
"voyuer",
"vpn",
"vsnet",
"vulnerability",
"vulnerable",
"wage",
"wages",
"wagner",
"wagon",
"wait",
"waiting",
"waiver",
"wake",
"wal",
"wales",
"walk",
"walked",
"walker",
"walking",
"walks",
"wall",
"wallace",
"wallet",
"wallpaper",
"wallpapers",
"walls",
"walnut",
"walt",
"walter",
"wan",
"wanna",
"want",
"wanted",
"wanting",
"wants",
"war",
"warcraft",
"ward",
"ware",
"warehouse",
"warm",
"warming",
"warned",
"warner",
"warning",
"warnings",
"warrant",
"warranties",
"warranty",
"warren",
"warrior",
"warriors",
"wars",
"was",
"wash",
"washer",
"washing",
"washington",
"waste",
"watch",
"watched",
"watches",
"watching",
"water",
"waterproof",
"waters",
"watershed",
"watson",
"watt",
"watts",
"wav",
"wave",
"waves",
"wax",
"way",
"wayne",
"ways",
"weak",
"wealth",
"weapon",
"weapons",
"wear",
"wearing",
"weather",
"web",
"webcam",
"webcams",
"webcast",
"weblog",
"weblogs",
"webmaster",
"webmasters",
"webpage",
"webshots",
"website",
"websites",
"webster",
"wed",
"wedding",
"weddings",
"wednesday",
"weed",
"week",
"weekend",
"weekends",
"weekly",
"weeks",
"weight",
"weighted",
"weights",
"weird",
"welcome",
"welding",
"welfare",
"well",
"wellington",
"wellness",
"wells",
"welsh",
"wendy",
"went",
"were",
"wesley",
"west",
"western",
"westminster",
"wet",
"whale",
"what",
"whatever",
"whats",
"wheat",
"wheel",
"wheels",
"when",
"whenever",
"where",
"whereas",
"wherever",
"whether",
"which",
"while",
"whilst",
"white",
"who",
"whole",
"wholesale",
"whom",
"whose",
"why",
"wichita",
"wicked",
"wide",
"widely",
"wider",
"widescreen",
"widespread",
"width",
"wife",
"wifi",
"wiki",
"wikipedia",
"wild",
"wilderness",
"wildlife",
"wiley",
"will",
"william",
"williams",
"willing",
"willow",
"wilson",
"win",
"wind",
"window",
"windows",
"winds",
"windsor",
"wine",
"wines",
"wing",
"wings",
"winner",
"winners",
"winning",
"wins",
"winston",
"winter",
"wire",
"wired",
"wireless",
"wires",
"wiring",
"wisconsin",
"wisdom",
"wise",
"wish",
"wishes",
"wishing",
"wishlist",
"wit",
"witch",
"with",
"withdrawal",
"within",
"without",
"witness",
"witnesses",
"wives",
"wizard",
"wma",
"wolf",
"woman",
"women",
"womens",
"won",
"wonder",
"wonderful",
"wondering",
"wood",
"wooden",
"woods",
"wool",
"worcester",
"word",
"wordpress",
"words",
"work",
"worked",
"worker",
"workers",
"workflow",
"workforce",
"working",
"workout",
"workplace",
"works",
"workshop",
"workshops",
"workstation",
"world",
"worldcat",
"worlds",
"worldwide",
"worm",
"worn",
"worried",
"worry",
"worse",
"worship",
"worst",
"worth",
"worthy",
"would",
"wound",
"wow",
"wrap",
"wrapped",
"wrapping",
"wrestling",
"wright",
"wrist",
"write",
"writer",
"writers",
"writes",
"writing",
"writings",
"written",
"wrong",
"wrote",
"wto",
"www",
"wyoming",
"xanax",
"xbox",
"xerox",
"xhtml",
"xml",
"yacht",
"yahoo",
"yale",
"yamaha",
"yang",
"yard",
"yards",
"yarn",
"yea",
"yeah",
"year",
"yearly",
"years",
"yeast",
"yellow",
"yemen",
"yen",
"yes",
"yesterday",
"yet",
"yield",
"yields",
"yoga",
"york",
"yorkshire",
"you",
"young",
"younger",
"your",
"yours",
"yourself",
"youth",
"yrs",
"yugoslavia",
"yukon",
"zambia",
"zdnet",
"zealand",
"zen",
"zero",
"zimbabwe",
"zinc",
"zip",
"zoloft",
"zone",
"zones",
"zoning",
"zoo",
"zoom",
"zope",
"zshops",
"zum",
"zus",
]
|
lk-geimfari/elizabeth
|
mimesis/data/int/person.py
|
Python
|
mit
| 140,388
|
from agrc import logging
import unittest
import sys
import datetime
import os
import shutil
from mock import Mock, patch
class LoggerTests(unittest.TestCase):
logTxt = 'test log text'
erTxt = 'test error text'
def setUp(self):
self.logger = logging.Logger()
def tearDown(self):
del self.logger
def test_init(self):
# should get the name of the script and date
self.assertEqual(self.logger.log, os.path.split(sys.argv[0])[1] + ' || ' +
datetime.datetime.now().strftime('%Y-%m-%d') + ' : ' +
datetime.datetime.now().strftime('%I:%M %p') + ' || None')
def test_log(self):
# should append the log message
original_length = len(self.logger.log)
self.logger.logMsg(self.logTxt)
self.assertGreater(self.logger.log.find(self.logTxt), original_length)
@patch('agrc.logging.Logger.logMsg')
@patch('arcpy.GetMessages')
def test_logGPMsg(self, GetMessages_mock, logMsg_mock):
# should call get messages on arcpy
self.logger.logGPMsg()
self.assertTrue(GetMessages_mock.called)
self.assertTrue(logMsg_mock.called)
def test_writeToLogFile(self):
if os.path.exists(self.logger.logFolder):
shutil.rmtree(self.logger.logFolder)
self.logger.writeLogToFile()
# should create folder for script
self.assertTrue(os.path.exists(self.logger.logFolder))
def test_logError(self):
self.logger.logMsg = Mock()
self.logger.logError()
self.assertTrue(self.logger.logMsg.called)
if __name__ == '__main__':
unittest.main()
|
ZachBeck/agrc.python
|
agrc/test/test_logging.py
|
Python
|
mit
| 1,666
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'LaunchWindow'
db.create_table(u'launch_window_launchwindow', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('description', self.gf('django.db.models.fields.TextField')()),
('cron_format', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
))
db.send_create_signal(u'launch_window', ['LaunchWindow'])
def backwards(self, orm):
# Deleting model 'LaunchWindow'
db.delete_table(u'launch_window_launchwindow')
models = {
u'launch_window.launchwindow': {
'Meta': {'object_name': 'LaunchWindow'},
'cron_format': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['launch_window']
|
naphthalene/fabric-bolt
|
fabric_bolt/launch_window/migrations/0001_initial.py
|
Python
|
mit
| 1,427
|
# -*- coding: utf-8 -*-
import logging
logger = logging.getLogger("sikteeri.views")
from django.conf import settings
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _
from sikteeri.version import VERSION
def frontpage(request):
if settings.MAINTENANCE_MESSAGE == None:
if not request.user.is_authenticated():
return redirect('membership.views.new_application')
return render_to_response('frontpage.html',
dict(title=_('Django and the jazz cigarette'),
version=VERSION),
context_instance=RequestContext(request))
else:
return render_to_response('maintenance_message.html',
{"title": _('Under maintenance'),
"maintenance_message": settings.MAINTENANCE_MESSAGE},
context_instance=RequestContext(request))
|
AriMartti/sikteeri
|
sikteeri/views.py
|
Python
|
mit
| 1,066
|
# -*- coding: utf-8 -*-
#
# Nikola documentation build configuration file, created by
# sphinx-quickstart on Sun Sep 22 17:43:37 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
from __future__ import unicode_literals
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
try:
import sphinxcontrib.gist # NOQA
extensions = ['sphinxcontrib.gist']
except ImportError:
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Nikola'
copyright = '2012-2015, The Nikola Contributors'
# The version info for the project yo're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '7.6.4'
# The full version, including alpha/beta/rc tags.
release = '7.6.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Nikoladoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto/manual]).
latex_documents = [
('index', 'Nikola.tex', 'Nikola Documentation',
'The Nikola Contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'nikola', 'Nikola Documentation',
['The Nikola Contributors'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Nikola', 'Nikola Documentation',
'The Nikola Contributors', 'Nikola', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
primary_domain = None
|
agustinhenze/nikola.debian
|
docs/sphinx/conf.py
|
Python
|
mit
| 8,386
|
"""
********************************************************************************
Learn Python the Hard Way Third Edition, by
Zed A. Shaw
ISBN: 978-0321884916
********************************************************************************
"""
import random
from urllib import urlopen
import sys
#debug = "DEBUG: "
WORD_URL = "http://learncodethehardway.org/words.txt"
WORDS = []
PHRASES = {
"class %%%(%%%):":
"Make a class named %%% that is-a %%%.",
"class %%%(object):\n\tdef __init__(self, ***)":
"class %%% has-a __init__ that takes self and *** parameters.",
"class %%%(object):\n\tdef ***(self,@@@)":
"class %%% has-a function named *** that takes self and @@@ parameters.",
"*** = %%%()":
"Set *** to an instance of class %%%.",
"***.***(@@@)":
"From *** get the *** function, and call it with parameters self, @@@.",
"***.*** = '***'":
"From *** get the *** attribute and set it to '***'."
}
# do they want to drill phrases first
PHRASE_FIRST = False
if len(sys.argv) == 2 and sys.argv[1] == "english":
PHRASE_FIRST = True
#print debug + "0"
# load up the words from the website
#for word in urlopen(WORD_URL).readlines():
# once downloaded just open the file locally):
for word in open('words.txt').readlines():
WORDS.append(word.strip())
#print debug + word
def convert(snippet, phrase):
class_names = [w.capitalize() for w in
random.sample(WORDS, snippet.count("%%%"))]
other_names = random.sample(WORDS, snippet.count("***"))
results = []
param_names = []
for i in range(0, snippet.count("@@@")):
param_count = random.randint(1,3)
param_names.append(', '.join(random.sample(WORDS, param_count)))
for sentence in snippet, phrase:
result = sentence[:]
#fake class names
for word in class_names:
result = result.replace("%%%", word, 1)
#fake other names
for word in other_names:
result = result.replace("***", word, 1)
#fake parameter lists
for word in param_names:
result = result.replace("@@@", word, 1)
results.append(result)
return results
# keep going until EOF
try:
while True:
snippets = PHRASES.keys()
#print debug + "3"
random.shuffle(snippets)
for snippet in snippets:
#print debug + "4"
phrase = PHRASES[snippet]
question, answer = convert(snippet, phrase)
if PHRASE_FIRST:
question, answer = answer, question
print question
raw_input("> ")
print "ANSWER: %s\n\n" % answer
except EOFError:
print "\nBye"
|
msnorm/projects
|
zspy2/ex41/ex41.py
|
Python
|
mit
| 2,749
|
import logging
from math import isclose
try: # pragma: no cover
import torch
optim = torch.optim
except ImportError: # pragma: no cover
optim = None
def pinverse(t):
"""
Computes the pseudo-inverse of a matrix using SVD.
Parameters
----------
t: torch.tensor
The matrix whose inverse is to be calculated.
Returns
-------
torch.tensor: Inverse of the matrix `t`.
"""
u, s, v = t.svd()
t_inv = v @ torch.diag(torch.where(s != 0, 1 / s, s)) @ u.t()
return t_inv
def optimize(
loss_fn, params={}, loss_args={}, opt="adam", max_iter=10000, exit_delta=1e-4
):
"""
Generic function to optimize loss functions.
Parameters
----------
loss_fn: Function
The function to optimize. It must return a torch.Tensor object.
params: dict {str: torch.Tensor}
The parameters which need to be optimized along with their initial values. The
dictionary should be of the form: {variable name: initial value}
loss_args: dict {str: torch.Tensor}
Extra parameters which loss function needs to compute the loss.
opt: str | Instance of torch.optim.Optimizer
The optimizer to use. Should either be an instance of torch.optim or a str.
When str is given initializes the optimizer with default parameters.
If str the options are:
1. Adadelta: Adadelta algorithm (Ref: https://arxiv.org/abs/1212.5701)
2. Adagrad: Adagrad algorithm (Ref: http://jmlr.org/papers/v12/duchi11a.html)
3. Adam: Adam algorithm (Ref: https://arxiv.org/abs/1412.6980)
4. SparseAdam: Lazy version of Adam. Suitable for sparse tensors.
5. Adamax: Adamax algorithm (variant of Adam based on infinity norm)
6. ASGD: Averaged Stochastic Gradient Descent (Ref: https://dl.acm.org/citation.cfm?id=131098)
7. LBFGS: L-BFGS Algorithm
8. RMSprop: RMSprop Algorithm (Ref: https://arxiv.org/abs/1308.0850v5)
9. Rprop: Resilient Backpropagation Algorithm
10. SGD: Stochastic Gradient Descent.
max_iter: int (default: 10000)
The maximum number of iterations to run the optimization for.
exit_delta: float
The optmization exit criteria. When change in loss in an iteration is less than
`exit_delta` the optimizer returns the values.
Returns
-------
dict: The values that were given in params in the same format.
Examples
--------
"""
# TODO: Add option to modify the optimizers.
init_loss = float("inf")
if isinstance(opt, str):
opt_dict = {
"adadelta": optim.Adadelta,
"adagrad": optim.Adagrad,
"adam": optim.Adam,
"sparseadam": optim.SparseAdam,
"adamax": optim.Adamax,
"asgd": optim.ASGD,
"lbfgs": optim.LBFGS,
"rmsprop": optim.RMSprop,
"rprop": optim.Rprop,
"sgd": optim.SGD,
}
opt = opt_dict[opt.lower()](params.values())
for t in range(max_iter):
def closure():
opt.zero_grad()
loss = loss_fn(params, loss_args)
loss.backward()
return loss
opt.step(closure=closure)
if isclose(init_loss, closure().item(), abs_tol=exit_delta):
logging.info(f"Converged after {t} iterations.")
return params
else:
init_loss = closure().item()
logging.info(
f"Couldn't converge after {max_iter} iterations. Try increasing max_iter or change optimizer parameters"
)
return params
|
pgmpy/pgmpy
|
pgmpy/utils/optimizer.py
|
Python
|
mit
| 3,651
|
# author: Milan Kubik
|
apophys/ipaqe-provision-hosts
|
ipaqe_provision_hosts/backend/__init__.py
|
Python
|
mit
| 22
|