code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Bruno Cauet
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import division, absolute_import, print_function
import os.path
from mock import Mock, patch, call
from tempfile import mkdtemp
from shutil import rmtree
import unittest
from test.helper import TestHelper
from beets.util import bytestring_path
from beetsplug.thumbnails import (ThumbnailsPlugin, NORMAL_DIR, LARGE_DIR,
write_metadata_im, write_metadata_pil,
PathlibURI, GioURI)
class ThumbnailsTest(unittest.TestCase, TestHelper):
def setUp(self):
self.setup_beets()
def tearDown(self):
self.teardown_beets()
@patch('beetsplug.thumbnails.util')
def test_write_metadata_im(self, mock_util):
metadata = {"a": u"A", "b": u"B"}
write_metadata_im("foo", metadata)
try:
command = u"convert foo -set a A -set b B foo".split(' ')
mock_util.command_output.assert_called_once_with(command)
except AssertionError:
command = u"convert foo -set b B -set a A foo".split(' ')
mock_util.command_output.assert_called_once_with(command)
@patch('beetsplug.thumbnails.ThumbnailsPlugin._check_local_ok')
@patch('beetsplug.thumbnails.os.stat')
def test_add_tags(self, mock_stat, _):
plugin = ThumbnailsPlugin()
plugin.write_metadata = Mock()
plugin.get_uri = Mock(side_effect={b"/path/to/cover":
"COVER_URI"}.__getitem__)
album = Mock(artpath=b"/path/to/cover")
mock_stat.return_value.st_mtime = 12345
plugin.add_tags(album, b"/path/to/thumbnail")
metadata = {"Thumb::URI": "COVER_URI",
"Thumb::MTime": u"12345"}
plugin.write_metadata.assert_called_once_with(b"/path/to/thumbnail",
metadata)
mock_stat.assert_called_once_with(album.artpath)
@patch('beetsplug.thumbnails.os')
@patch('beetsplug.thumbnails.ArtResizer')
@patch('beetsplug.thumbnails.get_im_version')
@patch('beetsplug.thumbnails.get_pil_version')
@patch('beetsplug.thumbnails.GioURI')
def test_check_local_ok(self, mock_giouri, mock_pil, mock_im,
mock_artresizer, mock_os):
# test local resizing capability
mock_artresizer.shared.local = False
plugin = ThumbnailsPlugin()
self.assertFalse(plugin._check_local_ok())
# test dirs creation
mock_artresizer.shared.local = True
def exists(path):
if path == NORMAL_DIR:
return False
if path == LARGE_DIR:
return True
raise ValueError(u"unexpected path {0!r}".format(path))
mock_os.path.exists = exists
plugin = ThumbnailsPlugin()
mock_os.makedirs.assert_called_once_with(NORMAL_DIR)
self.assertTrue(plugin._check_local_ok())
# test metadata writer function
mock_os.path.exists = lambda _: True
mock_pil.return_value = False
mock_im.return_value = False
with self.assertRaises(AssertionError):
ThumbnailsPlugin()
mock_pil.return_value = True
self.assertEqual(ThumbnailsPlugin().write_metadata, write_metadata_pil)
mock_im.return_value = True
self.assertEqual(ThumbnailsPlugin().write_metadata, write_metadata_im)
mock_pil.return_value = False
self.assertEqual(ThumbnailsPlugin().write_metadata, write_metadata_im)
self.assertTrue(ThumbnailsPlugin()._check_local_ok())
# test URI getter function
giouri_inst = mock_giouri.return_value
giouri_inst.available = True
self.assertEqual(ThumbnailsPlugin().get_uri, giouri_inst.uri)
giouri_inst.available = False
self.assertEqual(ThumbnailsPlugin().get_uri.__self__.__class__,
PathlibURI)
@patch('beetsplug.thumbnails.ThumbnailsPlugin._check_local_ok')
@patch('beetsplug.thumbnails.ArtResizer')
@patch('beetsplug.thumbnails.util')
@patch('beetsplug.thumbnails.os')
@patch('beetsplug.thumbnails.shutil')
def test_make_cover_thumbnail(self, mock_shutils, mock_os, mock_util,
mock_artresizer, _):
thumbnail_dir = os.path.normpath(b"/thumbnail/dir")
md5_file = os.path.join(thumbnail_dir, b"md5")
path_to_art = os.path.normpath(b"/path/to/art")
mock_os.path.join = os.path.join # don't mock that function
plugin = ThumbnailsPlugin()
plugin.add_tags = Mock()
album = Mock(artpath=path_to_art)
mock_util.syspath.side_effect = lambda x: x
plugin.thumbnail_file_name = Mock(return_value=b'md5')
mock_os.path.exists.return_value = False
def os_stat(target):
if target == md5_file:
return Mock(st_mtime=1)
elif target == path_to_art:
return Mock(st_mtime=2)
else:
raise ValueError(u"invalid target {0}".format(target))
mock_os.stat.side_effect = os_stat
plugin.make_cover_thumbnail(album, 12345, thumbnail_dir)
mock_os.path.exists.assert_called_once_with(md5_file)
mock_os.stat.has_calls([call(md5_file), call(path_to_art)],
any_order=True)
resize = mock_artresizer.shared.resize
resize.assert_called_once_with(12345, path_to_art, md5_file)
plugin.add_tags.assert_called_once_with(album, resize.return_value)
mock_shutils.move.assert_called_once_with(resize.return_value,
md5_file)
# now test with recent thumbnail & with force
mock_os.path.exists.return_value = True
plugin.force = False
resize.reset_mock()
def os_stat(target):
if target == md5_file:
return Mock(st_mtime=3)
elif target == path_to_art:
return Mock(st_mtime=2)
else:
raise ValueError(u"invalid target {0}".format(target))
mock_os.stat.side_effect = os_stat
plugin.make_cover_thumbnail(album, 12345, thumbnail_dir)
self.assertEqual(resize.call_count, 0)
# and with force
plugin.config['force'] = True
plugin.make_cover_thumbnail(album, 12345, thumbnail_dir)
resize.assert_called_once_with(12345, path_to_art, md5_file)
@patch('beetsplug.thumbnails.ThumbnailsPlugin._check_local_ok')
def test_make_dolphin_cover_thumbnail(self, _):
plugin = ThumbnailsPlugin()
tmp = bytestring_path(mkdtemp())
album = Mock(path=tmp,
artpath=os.path.join(tmp, b"cover.jpg"))
plugin.make_dolphin_cover_thumbnail(album)
with open(os.path.join(tmp, b".directory"), "rb") as f:
self.assertEqual(
f.read().splitlines(),
[b"[Desktop Entry]", b"Icon=./cover.jpg"]
)
# not rewritten when it already exists (yup that's a big limitation)
album.artpath = b"/my/awesome/art.tiff"
plugin.make_dolphin_cover_thumbnail(album)
with open(os.path.join(tmp, b".directory"), "rb") as f:
self.assertEqual(
f.read().splitlines(),
[b"[Desktop Entry]", b"Icon=./cover.jpg"]
)
rmtree(tmp)
@patch('beetsplug.thumbnails.ThumbnailsPlugin._check_local_ok')
@patch('beetsplug.thumbnails.ArtResizer')
def test_process_album(self, mock_artresizer, _):
get_size = mock_artresizer.shared.get_size
plugin = ThumbnailsPlugin()
make_cover = plugin.make_cover_thumbnail = Mock(return_value=True)
make_dolphin = plugin.make_dolphin_cover_thumbnail = Mock()
# no art
album = Mock(artpath=None)
plugin.process_album(album)
self.assertEqual(get_size.call_count, 0)
self.assertEqual(make_dolphin.call_count, 0)
# cannot get art size
album.artpath = b"/path/to/art"
get_size.return_value = None
plugin.process_album(album)
get_size.assert_called_once_with(b"/path/to/art")
self.assertEqual(make_cover.call_count, 0)
# dolphin tests
plugin.config['dolphin'] = False
plugin.process_album(album)
self.assertEqual(make_dolphin.call_count, 0)
plugin.config['dolphin'] = True
plugin.process_album(album)
make_dolphin.assert_called_once_with(album)
# small art
get_size.return_value = 200, 200
plugin.process_album(album)
make_cover.assert_called_once_with(album, 128, NORMAL_DIR)
# big art
make_cover.reset_mock()
get_size.return_value = 500, 500
plugin.process_album(album)
make_cover.has_calls([call(album, 128, NORMAL_DIR),
call(album, 256, LARGE_DIR)], any_order=True)
@patch('beetsplug.thumbnails.ThumbnailsPlugin._check_local_ok')
@patch('beetsplug.thumbnails.decargs')
def test_invokations(self, mock_decargs, _):
plugin = ThumbnailsPlugin()
plugin.process_album = Mock()
album = Mock()
plugin.process_album.reset_mock()
lib = Mock()
album2 = Mock()
lib.albums.return_value = [album, album2]
plugin.process_query(lib, Mock(), None)
lib.albums.assert_called_once_with(mock_decargs.return_value)
plugin.process_album.has_calls([call(album), call(album2)],
any_order=True)
@patch('beetsplug.thumbnails.BaseDirectory')
def test_thumbnail_file_name(self, mock_basedir):
plug = ThumbnailsPlugin()
plug.get_uri = Mock(return_value=u"file:///my/uri")
self.assertEqual(plug.thumbnail_file_name(b'idontcare'),
b"9488f5797fbe12ffb316d607dfd93d04.png")
def test_uri(self):
gio = GioURI()
if not gio.available:
self.skipTest(u"GIO library not found")
self.assertEqual(gio.uri(u"/foo"), u"file:///") # silent fail
self.assertEqual(gio.uri(b"/foo"), u"file:///foo")
self.assertEqual(gio.uri(b"/foo!"), u"file:///foo!")
self.assertEqual(
gio.uri(b'/music/\xec\x8b\xb8\xec\x9d\xb4'),
u'file:///music/%EC%8B%B8%EC%9D%B4')
class TestPathlibURI():
"""Test PathlibURI class"""
def test_uri(self):
test_uri = PathlibURI()
# test it won't break if we pass it bytes for a path
test_uri.uri(b'/')
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| shamangeorge/beets | test/test_thumbnails.py | Python | mit | 11,414 |
import fnmatch
import functools
import io
import ntpath
import os
import posixpath
import re
import sys
from collections import Sequence
from contextlib import contextmanager
from errno import EINVAL, ENOENT
from operator import attrgetter
from stat import S_ISDIR, S_ISLNK, S_ISREG, S_ISSOCK, S_ISBLK, S_ISCHR, S_ISFIFO
from urllib.parse import quote_from_bytes as urlquote_from_bytes
supports_symlinks = True
try:
import nt
except ImportError:
nt = None
else:
if sys.getwindowsversion()[:2] >= (6, 0):
from nt import _getfinalpathname
else:
supports_symlinks = False
_getfinalpathname = None
__all__ = [
"PurePath", "PurePosixPath", "PureWindowsPath",
"Path", "PosixPath", "WindowsPath",
]
#
# Internals
#
def _is_wildcard_pattern(pat):
# Whether this pattern needs actual matching using fnmatch, or can
# be looked up directly as a file.
return "*" in pat or "?" in pat or "[" in pat
class _Flavour(object):
"""A flavour implements a particular (platform-specific) set of path
semantics."""
def __init__(self):
self.join = self.sep.join
def parse_parts(self, parts):
parsed = []
sep = self.sep
altsep = self.altsep
drv = root = ''
it = reversed(parts)
for part in it:
if not part:
continue
if altsep:
part = part.replace(altsep, sep)
drv, root, rel = self.splitroot(part)
if sep in rel:
for x in reversed(rel.split(sep)):
if x and x != '.':
parsed.append(sys.intern(x))
else:
if rel and rel != '.':
parsed.append(sys.intern(rel))
if drv or root:
if not drv:
# If no drive is present, try to find one in the previous
# parts. This makes the result of parsing e.g.
# ("C:", "/", "a") reasonably intuitive.
for part in it:
drv = self.splitroot(part)[0]
if drv:
break
break
if drv or root:
parsed.append(drv + root)
parsed.reverse()
return drv, root, parsed
def join_parsed_parts(self, drv, root, parts, drv2, root2, parts2):
"""
Join the two paths represented by the respective
(drive, root, parts) tuples. Return a new (drive, root, parts) tuple.
"""
if root2:
if not drv2 and drv:
return drv, root2, [drv + root2] + parts2[1:]
elif drv2:
if drv2 == drv or self.casefold(drv2) == self.casefold(drv):
# Same drive => second path is relative to the first
return drv, root, parts + parts2[1:]
else:
# Second path is non-anchored (common case)
return drv, root, parts + parts2
return drv2, root2, parts2
class _WindowsFlavour(_Flavour):
# Reference for Windows paths can be found at
# http://msdn.microsoft.com/en-us/library/aa365247%28v=vs.85%29.aspx
sep = '\\'
altsep = '/'
has_drv = True
pathmod = ntpath
is_supported = (nt is not None)
drive_letters = (
set(chr(x) for x in range(ord('a'), ord('z') + 1)) |
set(chr(x) for x in range(ord('A'), ord('Z') + 1))
)
ext_namespace_prefix = '\\\\?\\'
reserved_names = (
{'CON', 'PRN', 'AUX', 'NUL'} |
{'COM%d' % i for i in range(1, 10)} |
{'LPT%d' % i for i in range(1, 10)}
)
# Interesting findings about extended paths:
# - '\\?\c:\a', '//?/c:\a' and '//?/c:/a' are all supported
# but '\\?\c:/a' is not
# - extended paths are always absolute; "relative" extended paths will
# fail.
def splitroot(self, part, sep=sep):
first = part[0:1]
second = part[1:2]
if (second == sep and first == sep):
# XXX extended paths should also disable the collapsing of "."
# components (according to MSDN docs).
prefix, part = self._split_extended_path(part)
first = part[0:1]
second = part[1:2]
else:
prefix = ''
third = part[2:3]
if (second == sep and first == sep and third != sep):
# is a UNC path:
# vvvvvvvvvvvvvvvvvvvvv root
# \\machine\mountpoint\directory\etc\...
# directory ^^^^^^^^^^^^^^
index = part.find(sep, 2)
if index != -1:
index2 = part.find(sep, index + 1)
# a UNC path can't have two slashes in a row
# (after the initial two)
if index2 != index + 1:
if index2 == -1:
index2 = len(part)
if prefix:
return prefix + part[1:index2], sep, part[index2+1:]
else:
return part[:index2], sep, part[index2+1:]
drv = root = ''
if second == ':' and first in self.drive_letters:
drv = part[:2]
part = part[2:]
first = third
if first == sep:
root = first
part = part.lstrip(sep)
return prefix + drv, root, part
def casefold(self, s):
return s.lower()
def casefold_parts(self, parts):
return [p.lower() for p in parts]
def resolve(self, path):
s = str(path)
if not s:
return os.getcwd()
if _getfinalpathname is not None:
return self._ext_to_normal(_getfinalpathname(s))
# Means fallback on absolute
return None
def _split_extended_path(self, s, ext_prefix=ext_namespace_prefix):
prefix = ''
if s.startswith(ext_prefix):
prefix = s[:4]
s = s[4:]
if s.startswith('UNC\\'):
prefix += s[:3]
s = '\\' + s[3:]
return prefix, s
def _ext_to_normal(self, s):
# Turn back an extended path into a normal DOS-like path
return self._split_extended_path(s)[1]
def is_reserved(self, parts):
# NOTE: the rules for reserved names seem somewhat complicated
# (e.g. r"..\NUL" is reserved but not r"foo\NUL").
# We err on the side of caution and return True for paths which are
# not considered reserved by Windows.
if not parts:
return False
if parts[0].startswith('\\\\'):
# UNC paths are never reserved
return False
return parts[-1].partition('.')[0].upper() in self.reserved_names
def make_uri(self, path):
# Under Windows, file URIs use the UTF-8 encoding.
drive = path.drive
if len(drive) == 2 and drive[1] == ':':
# It's a path on a local drive => 'file:///c:/a/b'
rest = path.as_posix()[2:].lstrip('/')
return 'file:///%s/%s' % (
drive, urlquote_from_bytes(rest.encode('utf-8')))
else:
# It's a path on a network drive => 'file://host/share/a/b'
return 'file:' + urlquote_from_bytes(path.as_posix().encode('utf-8'))
class _PosixFlavour(_Flavour):
sep = '/'
altsep = ''
has_drv = False
pathmod = posixpath
is_supported = (os.name != 'nt')
def splitroot(self, part, sep=sep):
if part and part[0] == sep:
stripped_part = part.lstrip(sep)
# According to POSIX path resolution:
# http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap04.html#tag_04_11
# "A pathname that begins with two successive slashes may be
# interpreted in an implementation-defined manner, although more
# than two leading slashes shall be treated as a single slash".
if len(part) - len(stripped_part) == 2:
return '', sep * 2, stripped_part
else:
return '', sep, stripped_part
else:
return '', '', part
def casefold(self, s):
return s
def casefold_parts(self, parts):
return parts
def resolve(self, path):
sep = self.sep
accessor = path._accessor
seen = {}
def _resolve(path, rest):
if rest.startswith(sep):
path = ''
for name in rest.split(sep):
if not name or name == '.':
# current dir
continue
if name == '..':
# parent dir
path, _, _ = path.rpartition(sep)
continue
newpath = path + sep + name
if newpath in seen:
# Already seen this path
path = seen[newpath]
if path is not None:
# use cached value
continue
# The symlink is not resolved, so we must have a symlink loop.
raise RuntimeError("Symlink loop from %r" % newpath)
# Resolve the symbolic link
try:
target = accessor.readlink(newpath)
except OSError as e:
if e.errno != EINVAL:
raise
# Not a symlink
path = newpath
else:
seen[newpath] = None # not resolved symlink
path = _resolve(path, target)
seen[newpath] = path # resolved symlink
return path
# NOTE: according to POSIX, getcwd() cannot contain path components
# which are symlinks.
base = '' if path.is_absolute() else os.getcwd()
return _resolve(base, str(path)) or sep
def is_reserved(self, parts):
return False
def make_uri(self, path):
# We represent the path using the local filesystem encoding,
# for portability to other applications.
bpath = bytes(path)
return 'file://' + urlquote_from_bytes(bpath)
_windows_flavour = _WindowsFlavour()
_posix_flavour = _PosixFlavour()
class _Accessor:
"""An accessor implements a particular (system-specific or not) way of
accessing paths on the filesystem."""
class _NormalAccessor(_Accessor):
def _wrap_strfunc(strfunc):
@functools.wraps(strfunc)
def wrapped(pathobj, *args):
return strfunc(str(pathobj), *args)
return staticmethod(wrapped)
def _wrap_binary_strfunc(strfunc):
@functools.wraps(strfunc)
def wrapped(pathobjA, pathobjB, *args):
return strfunc(str(pathobjA), str(pathobjB), *args)
return staticmethod(wrapped)
stat = _wrap_strfunc(os.stat)
lstat = _wrap_strfunc(os.lstat)
open = _wrap_strfunc(os.open)
listdir = _wrap_strfunc(os.listdir)
chmod = _wrap_strfunc(os.chmod)
if hasattr(os, "lchmod"):
lchmod = _wrap_strfunc(os.lchmod)
else:
def lchmod(self, pathobj, mode):
raise NotImplementedError("lchmod() not available on this system")
mkdir = _wrap_strfunc(os.mkdir)
unlink = _wrap_strfunc(os.unlink)
rmdir = _wrap_strfunc(os.rmdir)
rename = _wrap_binary_strfunc(os.rename)
replace = _wrap_binary_strfunc(os.replace)
if nt:
if supports_symlinks:
symlink = _wrap_binary_strfunc(os.symlink)
else:
def symlink(a, b, target_is_directory):
raise NotImplementedError("symlink() not available on this system")
else:
# Under POSIX, os.symlink() takes two args
@staticmethod
def symlink(a, b, target_is_directory):
return os.symlink(str(a), str(b))
utime = _wrap_strfunc(os.utime)
# Helper for resolve()
def readlink(self, path):
return os.readlink(path)
_normal_accessor = _NormalAccessor()
#
# Globbing helpers
#
@contextmanager
def _cached(func):
try:
func.__cached__
yield func
except AttributeError:
cache = {}
def wrapper(*args):
try:
return cache[args]
except KeyError:
value = cache[args] = func(*args)
return value
wrapper.__cached__ = True
try:
yield wrapper
finally:
cache.clear()
def _make_selector(pattern_parts):
pat = pattern_parts[0]
child_parts = pattern_parts[1:]
if pat == '**':
cls = _RecursiveWildcardSelector
elif '**' in pat:
raise ValueError("Invalid pattern: '**' can only be an entire path component")
elif _is_wildcard_pattern(pat):
cls = _WildcardSelector
else:
cls = _PreciseSelector
return cls(pat, child_parts)
if hasattr(functools, "lru_cache"):
_make_selector = functools.lru_cache()(_make_selector)
class _Selector:
"""A selector matches a specific glob pattern part against the children
of a given path."""
def __init__(self, child_parts):
self.child_parts = child_parts
if child_parts:
self.successor = _make_selector(child_parts)
else:
self.successor = _TerminatingSelector()
def select_from(self, parent_path):
"""Iterate over all child paths of `parent_path` matched by this
selector. This can contain parent_path itself."""
path_cls = type(parent_path)
is_dir = path_cls.is_dir
exists = path_cls.exists
listdir = parent_path._accessor.listdir
return self._select_from(parent_path, is_dir, exists, listdir)
class _TerminatingSelector:
def _select_from(self, parent_path, is_dir, exists, listdir):
yield parent_path
class _PreciseSelector(_Selector):
def __init__(self, name, child_parts):
self.name = name
_Selector.__init__(self, child_parts)
def _select_from(self, parent_path, is_dir, exists, listdir):
if not is_dir(parent_path):
return
path = parent_path._make_child_relpath(self.name)
if exists(path):
for p in self.successor._select_from(path, is_dir, exists, listdir):
yield p
class _WildcardSelector(_Selector):
def __init__(self, pat, child_parts):
self.pat = re.compile(fnmatch.translate(pat))
_Selector.__init__(self, child_parts)
def _select_from(self, parent_path, is_dir, exists, listdir):
if not is_dir(parent_path):
return
cf = parent_path._flavour.casefold
for name in listdir(parent_path):
casefolded = cf(name)
if self.pat.match(casefolded):
path = parent_path._make_child_relpath(name)
for p in self.successor._select_from(path, is_dir, exists, listdir):
yield p
class _RecursiveWildcardSelector(_Selector):
def __init__(self, pat, child_parts):
_Selector.__init__(self, child_parts)
def _iterate_directories(self, parent_path, is_dir, listdir):
yield parent_path
for name in listdir(parent_path):
path = parent_path._make_child_relpath(name)
if is_dir(path):
for p in self._iterate_directories(path, is_dir, listdir):
yield p
def _select_from(self, parent_path, is_dir, exists, listdir):
if not is_dir(parent_path):
return
with _cached(listdir) as listdir:
yielded = set()
try:
successor_select = self.successor._select_from
for starting_point in self._iterate_directories(parent_path, is_dir, listdir):
for p in successor_select(starting_point, is_dir, exists, listdir):
if p not in yielded:
yield p
yielded.add(p)
finally:
yielded.clear()
#
# Public API
#
class _PathParents(Sequence):
"""This object provides sequence-like access to the logical ancestors
of a path. Don't try to construct it yourself."""
__slots__ = ('_pathcls', '_drv', '_root', '_parts')
def __init__(self, path):
# We don't store the instance to avoid reference cycles
self._pathcls = type(path)
self._drv = path._drv
self._root = path._root
self._parts = path._parts
def __len__(self):
if self._drv or self._root:
return len(self._parts) - 1
else:
return len(self._parts)
def __getitem__(self, idx):
if idx < 0 or idx >= len(self):
raise IndexError(idx)
return self._pathcls._from_parsed_parts(self._drv, self._root,
self._parts[:-idx - 1])
def __repr__(self):
return "<{}.parents>".format(self._pathcls.__name__)
class PurePath(object):
"""PurePath represents a filesystem path and offers operations which
don't imply any actual filesystem I/O. Depending on your system,
instantiating a PurePath will return either a PurePosixPath or a
PureWindowsPath object. You can also instantiate either of these classes
directly, regardless of your system.
"""
__slots__ = (
'_drv', '_root', '_parts',
'_str', '_hash', '_pparts', '_cached_cparts',
)
def __new__(cls, *args):
"""Construct a PurePath from one or several strings and or existing
PurePath objects. The strings and path objects are combined so as
to yield a canonicalized path, which is incorporated into the
new PurePath object.
"""
if cls is PurePath:
cls = PureWindowsPath if os.name == 'nt' else PurePosixPath
return cls._from_parts(args)
def __reduce__(self):
# Using the parts tuple helps share interned path parts
# when pickling related paths.
return (self.__class__, tuple(self._parts))
@classmethod
def _parse_args(cls, args):
# This is useful when you don't want to create an instance, just
# canonicalize some constructor arguments.
parts = []
for a in args:
if isinstance(a, PurePath):
parts += a._parts
elif isinstance(a, str):
# Force-cast str subclasses to str (issue #21127)
parts.append(str(a))
else:
raise TypeError(
"argument should be a path or str object, not %r"
% type(a))
return cls._flavour.parse_parts(parts)
@classmethod
def _from_parts(cls, args, init=True):
# We need to call _parse_args on the instance, so as to get the
# right flavour.
self = object.__new__(cls)
drv, root, parts = self._parse_args(args)
self._drv = drv
self._root = root
self._parts = parts
if init:
self._init()
return self
@classmethod
def _from_parsed_parts(cls, drv, root, parts, init=True):
self = object.__new__(cls)
self._drv = drv
self._root = root
self._parts = parts
if init:
self._init()
return self
@classmethod
def _format_parsed_parts(cls, drv, root, parts):
if drv or root:
return drv + root + cls._flavour.join(parts[1:])
else:
return cls._flavour.join(parts)
def _init(self):
# Overriden in concrete Path
pass
def _make_child(self, args):
drv, root, parts = self._parse_args(args)
drv, root, parts = self._flavour.join_parsed_parts(
self._drv, self._root, self._parts, drv, root, parts)
return self._from_parsed_parts(drv, root, parts)
def __str__(self):
"""Return the string representation of the path, suitable for
passing to system calls."""
try:
return self._str
except AttributeError:
self._str = self._format_parsed_parts(self._drv, self._root,
self._parts) or '.'
return self._str
def as_posix(self):
"""Return the string representation of the path with forward (/)
slashes."""
f = self._flavour
return str(self).replace(f.sep, '/')
def __bytes__(self):
"""Return the bytes representation of the path. This is only
recommended to use under Unix."""
return os.fsencode(str(self))
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__, self.as_posix())
def as_uri(self):
"""Return the path as a 'file' URI."""
if not self.is_absolute():
raise ValueError("relative path can't be expressed as a file URI")
return self._flavour.make_uri(self)
@property
def _cparts(self):
# Cached casefolded parts, for hashing and comparison
try:
return self._cached_cparts
except AttributeError:
self._cached_cparts = self._flavour.casefold_parts(self._parts)
return self._cached_cparts
def __eq__(self, other):
if not isinstance(other, PurePath):
return NotImplemented
return self._cparts == other._cparts and self._flavour is other._flavour
def __ne__(self, other):
return not self == other
def __hash__(self):
try:
return self._hash
except AttributeError:
self._hash = hash(tuple(self._cparts))
return self._hash
def __lt__(self, other):
if not isinstance(other, PurePath) or self._flavour is not other._flavour:
return NotImplemented
return self._cparts < other._cparts
def __le__(self, other):
if not isinstance(other, PurePath) or self._flavour is not other._flavour:
return NotImplemented
return self._cparts <= other._cparts
def __gt__(self, other):
if not isinstance(other, PurePath) or self._flavour is not other._flavour:
return NotImplemented
return self._cparts > other._cparts
def __ge__(self, other):
if not isinstance(other, PurePath) or self._flavour is not other._flavour:
return NotImplemented
return self._cparts >= other._cparts
drive = property(attrgetter('_drv'),
doc="""The drive prefix (letter or UNC path), if any.""")
root = property(attrgetter('_root'),
doc="""The root of the path, if any.""")
@property
def anchor(self):
"""The concatenation of the drive and root, or ''."""
anchor = self._drv + self._root
return anchor
@property
def name(self):
"""The final path component, if any."""
parts = self._parts
if len(parts) == (1 if (self._drv or self._root) else 0):
return ''
return parts[-1]
@property
def suffix(self):
"""The final component's last suffix, if any."""
name = self.name
i = name.rfind('.')
if 0 < i < len(name) - 1:
return name[i:]
else:
return ''
@property
def suffixes(self):
"""A list of the final component's suffixes, if any."""
name = self.name
if name.endswith('.'):
return []
name = name.lstrip('.')
return ['.' + suffix for suffix in name.split('.')[1:]]
@property
def stem(self):
"""The final path component, minus its last suffix."""
name = self.name
i = name.rfind('.')
if 0 < i < len(name) - 1:
return name[:i]
else:
return name
def with_name(self, name):
"""Return a new path with the file name changed."""
if not self.name:
raise ValueError("%r has an empty name" % (self,))
drv, root, parts = self._flavour.parse_parts((name,))
if (not name or name[-1] in [self._flavour.sep, self._flavour.altsep]
or drv or root or len(parts) != 1):
raise ValueError("Invalid name %r" % (name))
return self._from_parsed_parts(self._drv, self._root,
self._parts[:-1] + [name])
def with_suffix(self, suffix):
"""Return a new path with the file suffix changed (or added, if none)."""
# XXX if suffix is None, should the current suffix be removed?
f = self._flavour
if f.sep in suffix or f.altsep and f.altsep in suffix:
raise ValueError("Invalid suffix %r" % (suffix))
if suffix and not suffix.startswith('.') or suffix == '.':
raise ValueError("Invalid suffix %r" % (suffix))
name = self.name
if not name:
raise ValueError("%r has an empty name" % (self,))
old_suffix = self.suffix
if not old_suffix:
name = name + suffix
else:
name = name[:-len(old_suffix)] + suffix
return self._from_parsed_parts(self._drv, self._root,
self._parts[:-1] + [name])
def relative_to(self, *other):
"""Return the relative path to another path identified by the passed
arguments. If the operation is not possible (because this is not
a subpath of the other path), raise ValueError.
"""
# For the purpose of this method, drive and root are considered
# separate parts, i.e.:
# Path('c:/').relative_to('c:') gives Path('/')
# Path('c:/').relative_to('/') raise ValueError
if not other:
raise TypeError("need at least one argument")
parts = self._parts
drv = self._drv
root = self._root
if root:
abs_parts = [drv, root] + parts[1:]
else:
abs_parts = parts
to_drv, to_root, to_parts = self._parse_args(other)
if to_root:
to_abs_parts = [to_drv, to_root] + to_parts[1:]
else:
to_abs_parts = to_parts
n = len(to_abs_parts)
cf = self._flavour.casefold_parts
if (root or drv) if n == 0 else cf(abs_parts[:n]) != cf(to_abs_parts):
formatted = self._format_parsed_parts(to_drv, to_root, to_parts)
raise ValueError("{!r} does not start with {!r}"
.format(str(self), str(formatted)))
return self._from_parsed_parts('', root if n == 1 else '',
abs_parts[n:])
@property
def parts(self):
"""An object providing sequence-like access to the
components in the filesystem path."""
# We cache the tuple to avoid building a new one each time .parts
# is accessed. XXX is this necessary?
try:
return self._pparts
except AttributeError:
self._pparts = tuple(self._parts)
return self._pparts
def joinpath(self, *args):
"""Combine this path with one or several arguments, and return a
new path representing either a subpath (if all arguments are relative
paths) or a totally different path (if one of the arguments is
anchored).
"""
return self._make_child(args)
def __truediv__(self, key):
return self._make_child((key,))
def __rtruediv__(self, key):
return self._from_parts([key] + self._parts)
@property
def parent(self):
"""The logical parent of the path."""
drv = self._drv
root = self._root
parts = self._parts
if len(parts) == 1 and (drv or root):
return self
return self._from_parsed_parts(drv, root, parts[:-1])
@property
def parents(self):
"""A sequence of this path's logical parents."""
return _PathParents(self)
def is_absolute(self):
"""True if the path is absolute (has both a root and, if applicable,
a drive)."""
if not self._root:
return False
return not self._flavour.has_drv or bool(self._drv)
def is_reserved(self):
"""Return True if the path contains one of the special names reserved
by the system, if any."""
return self._flavour.is_reserved(self._parts)
def match(self, path_pattern):
"""
Return True if this path matches the given pattern.
"""
cf = self._flavour.casefold
path_pattern = cf(path_pattern)
drv, root, pat_parts = self._flavour.parse_parts((path_pattern,))
if not pat_parts:
raise ValueError("empty pattern")
if drv and drv != cf(self._drv):
return False
if root and root != cf(self._root):
return False
parts = self._cparts
if drv or root:
if len(pat_parts) != len(parts):
return False
pat_parts = pat_parts[1:]
elif len(pat_parts) > len(parts):
return False
for part, pat in zip(reversed(parts), reversed(pat_parts)):
if not fnmatch.fnmatchcase(part, pat):
return False
return True
class PurePosixPath(PurePath):
_flavour = _posix_flavour
__slots__ = ()
class PureWindowsPath(PurePath):
_flavour = _windows_flavour
__slots__ = ()
# Filesystem-accessing classes
class Path(PurePath):
__slots__ = (
'_accessor',
'_closed',
)
def __new__(cls, *args, **kwargs):
if cls is Path:
cls = WindowsPath if os.name == 'nt' else PosixPath
self = cls._from_parts(args, init=False)
if not self._flavour.is_supported:
raise NotImplementedError("cannot instantiate %r on your system"
% (cls.__name__,))
self._init()
return self
def _init(self,
# Private non-constructor arguments
template=None,
):
self._closed = False
if template is not None:
self._accessor = template._accessor
else:
self._accessor = _normal_accessor
def _make_child_relpath(self, part):
# This is an optimization used for dir walking. `part` must be
# a single part relative to this path.
parts = self._parts + [part]
return self._from_parsed_parts(self._drv, self._root, parts)
def __enter__(self):
if self._closed:
self._raise_closed()
return self
def __exit__(self, t, v, tb):
self._closed = True
def _raise_closed(self):
raise ValueError("I/O operation on closed path")
def _opener(self, name, flags, mode=0o666):
# A stub for the opener argument to built-in open()
return self._accessor.open(self, flags, mode)
def _raw_open(self, flags, mode=0o777):
"""
Open the file pointed by this path and return a file descriptor,
as os.open() does.
"""
if self._closed:
self._raise_closed()
return self._accessor.open(self, flags, mode)
# Public API
@classmethod
def cwd(cls):
"""Return a new path pointing to the current working directory
(as returned by os.getcwd()).
"""
return cls(os.getcwd())
def iterdir(self):
"""Iterate over the files in this directory. Does not yield any
result for the special paths '.' and '..'.
"""
if self._closed:
self._raise_closed()
for name in self._accessor.listdir(self):
if name in {'.', '..'}:
# Yielding a path object for these makes little sense
continue
yield self._make_child_relpath(name)
if self._closed:
self._raise_closed()
def glob(self, pattern):
"""Iterate over this subtree and yield all existing files (of any
kind, including directories) matching the given pattern.
"""
pattern = self._flavour.casefold(pattern)
drv, root, pattern_parts = self._flavour.parse_parts((pattern,))
if drv or root:
raise NotImplementedError("Non-relative patterns are unsupported")
selector = _make_selector(tuple(pattern_parts))
for p in selector.select_from(self):
yield p
def rglob(self, pattern):
"""Recursively yield all existing files (of any kind, including
directories) matching the given pattern, anywhere in this subtree.
"""
pattern = self._flavour.casefold(pattern)
drv, root, pattern_parts = self._flavour.parse_parts((pattern,))
if drv or root:
raise NotImplementedError("Non-relative patterns are unsupported")
selector = _make_selector(("**",) + tuple(pattern_parts))
for p in selector.select_from(self):
yield p
def absolute(self):
"""Return an absolute version of this path. This function works
even if the path doesn't point to anything.
No normalization is done, i.e. all '.' and '..' will be kept along.
Use resolve() to get the canonical path to a file.
"""
# XXX untested yet!
if self._closed:
self._raise_closed()
if self.is_absolute():
return self
# FIXME this must defer to the specific flavour (and, under Windows,
# use nt._getfullpathname())
obj = self._from_parts([os.getcwd()] + self._parts, init=False)
obj._init(template=self)
return obj
def resolve(self):
"""
Make the path absolute, resolving all symlinks on the way and also
normalizing it (for example turning slashes into backslashes under
Windows).
"""
if self._closed:
self._raise_closed()
s = self._flavour.resolve(self)
if s is None:
# No symlink resolution => for consistency, raise an error if
# the path doesn't exist or is forbidden
self.stat()
s = str(self.absolute())
# Now we have no symlinks in the path, it's safe to normalize it.
normed = self._flavour.pathmod.normpath(s)
obj = self._from_parts((normed,), init=False)
obj._init(template=self)
return obj
def stat(self):
"""
Return the result of the stat() system call on this path, like
os.stat() does.
"""
return self._accessor.stat(self)
def owner(self):
"""
Return the login name of the file owner.
"""
import pwd
return pwd.getpwuid(self.stat().st_uid).pw_name
def group(self):
"""
Return the group name of the file gid.
"""
import grp
return grp.getgrgid(self.stat().st_gid).gr_name
def open(self, mode='r', buffering=-1, encoding=None,
errors=None, newline=None):
"""
Open the file pointed by this path and return a file object, as
the built-in open() function does.
"""
if self._closed:
self._raise_closed()
return io.open(str(self), mode, buffering, encoding, errors, newline,
opener=self._opener)
def touch(self, mode=0o666, exist_ok=True):
"""
Create this file with the given access mode, if it doesn't exist.
"""
if self._closed:
self._raise_closed()
if exist_ok:
# First try to bump modification time
# Implementation note: GNU touch uses the UTIME_NOW option of
# the utimensat() / futimens() functions.
try:
self._accessor.utime(self, None)
except OSError:
# Avoid exception chaining
pass
else:
return
flags = os.O_CREAT | os.O_WRONLY
if not exist_ok:
flags |= os.O_EXCL
fd = self._raw_open(flags, mode)
os.close(fd)
def mkdir(self, mode=0o777, parents=False):
if self._closed:
self._raise_closed()
if not parents:
self._accessor.mkdir(self, mode)
else:
try:
self._accessor.mkdir(self, mode)
except OSError as e:
if e.errno != ENOENT:
raise
self.parent.mkdir(parents=True)
self._accessor.mkdir(self, mode)
def chmod(self, mode):
"""
Change the permissions of the path, like os.chmod().
"""
if self._closed:
self._raise_closed()
self._accessor.chmod(self, mode)
def lchmod(self, mode):
"""
Like chmod(), except if the path points to a symlink, the symlink's
permissions are changed, rather than its target's.
"""
if self._closed:
self._raise_closed()
self._accessor.lchmod(self, mode)
def unlink(self):
"""
Remove this file or link.
If the path is a directory, use rmdir() instead.
"""
if self._closed:
self._raise_closed()
self._accessor.unlink(self)
def rmdir(self):
"""
Remove this directory. The directory must be empty.
"""
if self._closed:
self._raise_closed()
self._accessor.rmdir(self)
def lstat(self):
"""
Like stat(), except if the path points to a symlink, the symlink's
status information is returned, rather than its target's.
"""
if self._closed:
self._raise_closed()
return self._accessor.lstat(self)
def rename(self, target):
"""
Rename this path to the given path.
"""
if self._closed:
self._raise_closed()
self._accessor.rename(self, target)
def replace(self, target):
"""
Rename this path to the given path, clobbering the existing
destination if it exists.
"""
if self._closed:
self._raise_closed()
self._accessor.replace(self, target)
def symlink_to(self, target, target_is_directory=False):
"""
Make this path a symlink pointing to the given path.
Note the order of arguments (self, target) is the reverse of os.symlink's.
"""
if self._closed:
self._raise_closed()
self._accessor.symlink(target, self, target_is_directory)
# Convenience functions for querying the stat results
def exists(self):
"""
Whether this path exists.
"""
try:
self.stat()
except OSError as e:
if e.errno != ENOENT:
raise
return False
return True
def is_dir(self):
"""
Whether this path is a directory.
"""
try:
return S_ISDIR(self.stat().st_mode)
except OSError as e:
if e.errno != ENOENT:
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
def is_file(self):
"""
Whether this path is a regular file (also True for symlinks pointing
to regular files).
"""
try:
return S_ISREG(self.stat().st_mode)
except OSError as e:
if e.errno != ENOENT:
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
def is_symlink(self):
"""
Whether this path is a symbolic link.
"""
try:
return S_ISLNK(self.lstat().st_mode)
except OSError as e:
if e.errno != ENOENT:
raise
# Path doesn't exist
return False
def is_block_device(self):
"""
Whether this path is a block device.
"""
try:
return S_ISBLK(self.stat().st_mode)
except OSError as e:
if e.errno != ENOENT:
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
def is_char_device(self):
"""
Whether this path is a character device.
"""
try:
return S_ISCHR(self.stat().st_mode)
except OSError as e:
if e.errno != ENOENT:
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
def is_fifo(self):
"""
Whether this path is a FIFO.
"""
try:
return S_ISFIFO(self.stat().st_mode)
except OSError as e:
if e.errno != ENOENT:
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
def is_socket(self):
"""
Whether this path is a socket.
"""
try:
return S_ISSOCK(self.stat().st_mode)
except OSError as e:
if e.errno != ENOENT:
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
class PosixPath(Path, PurePosixPath):
__slots__ = ()
class WindowsPath(Path, PureWindowsPath):
__slots__ = ()
| gautamMalu/rootfs_xen_arndale | usr/lib/python3.4/pathlib.py | Python | gpl-2.0 | 41,820 |
# -*- coding: utf-8 -*-
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck
# Robert Layton <robertlayton@gmail.com>
# Jochen Wersdörfer <jochen@wersdoerfer.de>
# Roman Sinayev <roman.sinayev@gmail.com>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
import array
from collections import defaultdict
from collections.abc import Mapping
from functools import partial
import numbers
from operator import itemgetter
import re
import unicodedata
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin, _OneToOneFeatureMixin
from ..preprocessing import normalize
from ._hash import FeatureHasher
from ._stop_words import ENGLISH_STOP_WORDS
from ..utils.validation import check_is_fitted, check_array, FLOAT_DTYPES, check_scalar
from ..utils.deprecation import deprecated
from ..utils import _IS_32BIT
from ..utils.fixes import _astype_copy_false
from ..exceptions import NotFittedError
__all__ = [
"HashingVectorizer",
"CountVectorizer",
"ENGLISH_STOP_WORDS",
"TfidfTransformer",
"TfidfVectorizer",
"strip_accents_ascii",
"strip_accents_unicode",
"strip_tags",
]
def _preprocess(doc, accent_function=None, lower=False):
"""Chain together an optional series of text preprocessing steps to
apply to a document.
Parameters
----------
doc: str
The string to preprocess
accent_function: callable, default=None
Function for handling accented characters. Common strategies include
normalizing and removing.
lower: bool, default=False
Whether to use str.lower to lowercase all of the text
Returns
-------
doc: str
preprocessed string
"""
if lower:
doc = doc.lower()
if accent_function is not None:
doc = accent_function(doc)
return doc
def _analyze(
doc,
analyzer=None,
tokenizer=None,
ngrams=None,
preprocessor=None,
decoder=None,
stop_words=None,
):
"""Chain together an optional series of text processing steps to go from
a single document to ngrams, with or without tokenizing or preprocessing.
If analyzer is used, only the decoder argument is used, as the analyzer is
intended to replace the preprocessor, tokenizer, and ngrams steps.
Parameters
----------
analyzer: callable, default=None
tokenizer: callable, default=None
ngrams: callable, default=None
preprocessor: callable, default=None
decoder: callable, default=None
stop_words: list, default=None
Returns
-------
ngrams: list
A sequence of tokens, possibly with pairs, triples, etc.
"""
if decoder is not None:
doc = decoder(doc)
if analyzer is not None:
doc = analyzer(doc)
else:
if preprocessor is not None:
doc = preprocessor(doc)
if tokenizer is not None:
doc = tokenizer(doc)
if ngrams is not None:
if stop_words is not None:
doc = ngrams(doc, stop_words)
else:
doc = ngrams(doc)
return doc
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
Parameters
----------
s : string
The string to strip
See Also
--------
strip_accents_ascii : Remove accentuated char for any unicode symbol that
has a direct ASCII equivalent.
"""
try:
# If `s` is ASCII-compatible, then it does not contain any accented
# characters and we can avoid an expensive list comprehension
s.encode("ASCII", errors="strict")
return s
except UnicodeEncodeError:
normalized = unicodedata.normalize("NFKD", s)
return "".join([c for c in normalized if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
Parameters
----------
s : str
The string to strip
See Also
--------
strip_accents_unicode : Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize("NFKD", s)
return nkfd_form.encode("ASCII", "ignore").decode("ASCII")
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
Parameters
----------
s : str
The string to strip
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, str):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class _VectorizerMixin:
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols.
The decoding strategy depends on the vectorizer parameters.
Parameters
----------
doc : bytes or str
The string to decode.
Returns
-------
doc: str
A string of unicode symbols.
"""
if self.input == "filename":
with open(doc, "rb") as fh:
doc = fh.read()
elif self.input == "file":
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError(
"np.nan is an invalid document, expected byte or unicode string."
)
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
if min_n == 1:
# no need to do any slicing for unigrams
# just iterate through the original tokens
tokens = list(original_tokens)
min_n += 1
else:
tokens = []
n_original_tokens = len(original_tokens)
# bind method outside of loop to reduce overhead
tokens_append = tokens.append
space_join = " ".join
for n in range(min_n, min(max_n + 1, n_original_tokens + 1)):
for i in range(n_original_tokens - n + 1):
tokens_append(space_join(original_tokens[i : i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
min_n, max_n = self.ngram_range
if min_n == 1:
# no need to do any slicing for unigrams
# iterate through the string
ngrams = list(text_document)
min_n += 1
else:
ngrams = []
# bind method outside of loop to reduce overhead
ngrams_append = ngrams.append
for n in range(min_n, min(max_n + 1, text_len + 1)):
for i in range(text_len - n + 1):
ngrams_append(text_document[i : i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
operating only inside word boundaries. n-grams at the edges
of words are padded with space."""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
# bind method outside of loop to reduce overhead
ngrams_append = ngrams.append
for w in text_document.split():
w = " " + w + " "
w_len = len(w)
for n in range(min_n, max_n + 1):
offset = 0
ngrams_append(w[offset : offset + n])
while offset + n < w_len:
offset += 1
ngrams_append(w[offset : offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization.
Returns
-------
preprocessor: callable
A function to preprocess the text before tokenization.
"""
if self.preprocessor is not None:
return self.preprocessor
# accent stripping
if not self.strip_accents:
strip_accents = None
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == "ascii":
strip_accents = strip_accents_ascii
elif self.strip_accents == "unicode":
strip_accents = strip_accents_unicode
else:
raise ValueError(
'Invalid value for "strip_accents": %s' % self.strip_accents
)
return partial(_preprocess, accent_function=strip_accents, lower=self.lowercase)
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens.
Returns
-------
tokenizer: callable
A function to split a string into a sequence of tokens.
"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
if token_pattern.groups > 1:
raise ValueError(
"More than 1 capturing group in token pattern. Only a single "
"group should be captured."
)
return token_pattern.findall
def get_stop_words(self):
"""Build or fetch the effective stop words list.
Returns
-------
stop_words: list or None
A list of stop words.
"""
return _check_stop_list(self.stop_words)
def _check_stop_words_consistency(self, stop_words, preprocess, tokenize):
"""Check if stop words are consistent
Returns
-------
is_consistent : True if stop words are consistent with the preprocessor
and tokenizer, False if they are not, None if the check
was previously performed, "error" if it could not be
performed (e.g. because of the use of a custom
preprocessor / tokenizer)
"""
if id(self.stop_words) == getattr(self, "_stop_words_id", None):
# Stop words are were previously validated
return None
# NB: stop_words is validated, unlike self.stop_words
try:
inconsistent = set()
for w in stop_words or ():
tokens = list(tokenize(preprocess(w)))
for token in tokens:
if token not in stop_words:
inconsistent.add(token)
self._stop_words_id = id(self.stop_words)
if inconsistent:
warnings.warn(
"Your stop_words may be inconsistent with "
"your preprocessing. Tokenizing the stop "
"words generated tokens %r not in "
"stop_words."
% sorted(inconsistent)
)
return not inconsistent
except Exception:
# Failed to check stop words consistency (e.g. because a custom
# preprocessor or tokenizer was used)
self._stop_words_id = id(self.stop_words)
return "error"
def build_analyzer(self):
"""Return a callable to process input data.
The callable handles that handles preprocessing, tokenization, and
n-grams generation.
Returns
-------
analyzer: callable
A function to handle preprocessing, tokenization
and n-grams generation.
"""
if callable(self.analyzer):
return partial(_analyze, analyzer=self.analyzer, decoder=self.decode)
preprocess = self.build_preprocessor()
if self.analyzer == "char":
return partial(
_analyze,
ngrams=self._char_ngrams,
preprocessor=preprocess,
decoder=self.decode,
)
elif self.analyzer == "char_wb":
return partial(
_analyze,
ngrams=self._char_wb_ngrams,
preprocessor=preprocess,
decoder=self.decode,
)
elif self.analyzer == "word":
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
self._check_stop_words_consistency(stop_words, preprocess, tokenize)
return partial(
_analyze,
ngrams=self._word_ngrams,
tokenizer=tokenize,
preprocessor=preprocess,
decoder=self.decode,
stop_words=stop_words,
)
else:
raise ValueError(
"%s is not a valid tokenization scheme/analyzer" % self.analyzer
)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if isinstance(vocabulary, set):
vocabulary = sorted(vocabulary)
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(vocabulary.values())
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in range(len(vocabulary)):
if i not in indices:
msg = "Vocabulary of size %d doesn't contain index %d." % (
len(vocabulary),
i,
)
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fitted)"""
if not hasattr(self, "vocabulary_"):
self._validate_vocabulary()
if not self.fixed_vocabulary_:
raise NotFittedError("Vocabulary not fitted or provided")
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
def _validate_params(self):
"""Check validity of ngram_range parameter"""
min_n, max_m = self.ngram_range
if min_n > max_m:
raise ValueError(
"Invalid value for ngram_range=%s "
"lower boundary larger than the upper boundary."
% str(self.ngram_range)
)
def _warn_for_unused_params(self):
if self.tokenizer is not None and self.token_pattern is not None:
warnings.warn(
"The parameter 'token_pattern' will not be used"
" since 'tokenizer' is not None'"
)
if self.preprocessor is not None and callable(self.analyzer):
warnings.warn(
"The parameter 'preprocessor' will not be used"
" since 'analyzer' is callable'"
)
if (
self.ngram_range != (1, 1)
and self.ngram_range is not None
and callable(self.analyzer)
):
warnings.warn(
"The parameter 'ngram_range' will not be used"
" since 'analyzer' is callable'"
)
if self.analyzer != "word" or callable(self.analyzer):
if self.stop_words is not None:
warnings.warn(
"The parameter 'stop_words' will not be used"
" since 'analyzer' != 'word'"
)
if (
self.token_pattern is not None
and self.token_pattern != r"(?u)\b\w\w+\b"
):
warnings.warn(
"The parameter 'token_pattern' will not be used"
" since 'analyzer' != 'word'"
)
if self.tokenizer is not None:
warnings.warn(
"The parameter 'tokenizer' will not be used"
" since 'analyzer' != 'word'"
)
class HashingVectorizer(TransformerMixin, _VectorizerMixin, BaseEstimator):
r"""Convert a collection of text documents to a matrix of token occurrences.
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory.
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters.
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : {'filename', 'file', 'content'}, default='content'
- If `'filename'`, the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
- If `'file'`, the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
- If `'content'`, the input is expected to be a sequence of items that
can be of type string or byte.
encoding : str, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}, default='strict'
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode'}, default=None
Remove accents and perform other character normalization
during the preprocessing step.
'ascii' is a fast method that only works on characters that have
a direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
Both 'ascii' and 'unicode' use NFKD normalization from
:func:`unicodedata.normalize`.
lowercase : bool, default=True
Convert all characters to lowercase before tokenizing.
preprocessor : callable, default=None
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
Only applies if ``analyzer`` is not callable.
tokenizer : callable, default=None
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
stop_words : {'english'}, list, default=None
If 'english', a built-in stop word list for English is used.
There are several known issues with 'english' and you should
consider an alternative (see :ref:`stop_words`).
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
token_pattern : str, default=r"(?u)\\b\\w\\w+\\b"
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
If there is a capturing group in token_pattern then the
captured group content, not the entire match, becomes the token.
At most one capturing group is permitted.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used. For example an ``ngram_range`` of ``(1, 1)`` means only
unigrams, ``(1, 2)`` means unigrams and bigrams, and ``(2, 2)`` means
only bigrams.
Only applies if ``analyzer`` is not callable.
analyzer : {'word', 'char', 'char_wb'} or callable, default='word'
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries; n-grams at the edges of words are padded with space.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
.. versionchanged:: 0.21
Since v0.21, if ``input`` is ``'filename'`` or ``'file'``, the data
is first read from the file and then passed to the given callable
analyzer.
n_features : int, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
binary : bool, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
norm : {'l1', 'l2'}, default='l2'
Norm used to normalize term vectors. None for no normalization.
alternate_sign : bool, default=True
When True, an alternating sign is added to the features as to
approximately conserve the inner product in the hashed space even for
small n_features. This approach is similar to sparse random projection.
.. versionadded:: 0.19
dtype : type, default=np.float64
Type of the matrix returned by fit_transform() or transform().
See Also
--------
CountVectorizer : Convert a collection of text documents to a matrix of
token counts.
TfidfVectorizer : Convert a collection of raw documents to a matrix of
TF-IDF features.
Examples
--------
>>> from sklearn.feature_extraction.text import HashingVectorizer
>>> corpus = [
... 'This is the first document.',
... 'This document is the second document.',
... 'And this is the third one.',
... 'Is this the first document?',
... ]
>>> vectorizer = HashingVectorizer(n_features=2**4)
>>> X = vectorizer.fit_transform(corpus)
>>> print(X.shape)
(4, 16)
"""
def __init__(
self,
*,
input="content",
encoding="utf-8",
decode_error="strict",
strip_accents=None,
lowercase=True,
preprocessor=None,
tokenizer=None,
stop_words=None,
token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1),
analyzer="word",
n_features=(2 ** 20),
binary=False,
norm="l2",
alternate_sign=True,
dtype=np.float64,
):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.alternate_sign = alternate_sign
self.dtype = dtype
def partial_fit(self, X, y=None):
"""No-op: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
Parameters
----------
X : ndarray of shape [n_samples, n_features]
Training data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
HashingVectorizer instance.
"""
return self
def fit(self, X, y=None):
"""No-op: this transformer is stateless.
Parameters
----------
X : ndarray of shape [n_samples, n_features]
Training data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
HashingVectorizer instance.
"""
# triggers a parameter validation
if isinstance(X, str):
raise ValueError(
"Iterable over raw text documents expected, string object received."
)
self._warn_for_unused_params()
self._validate_params()
self._get_hasher().fit(X, y=y)
return self
def transform(self, X):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
Returns
-------
X : sparse matrix of shape (n_samples, n_features)
Document-term matrix.
"""
if isinstance(X, str):
raise ValueError(
"Iterable over raw text documents expected, string object received."
)
self._validate_params()
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
def fit_transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
X : sparse matrix of shape (n_samples, n_features)
Document-term matrix.
"""
return self.fit(X, y).transform(X)
def _get_hasher(self):
return FeatureHasher(
n_features=self.n_features,
input_type="string",
dtype=self.dtype,
alternate_sign=self.alternate_sign,
)
def _more_tags(self):
return {"X_types": ["string"]}
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return np.bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(X.indptr)
class CountVectorizer(_VectorizerMixin, BaseEstimator):
r"""Convert a collection of text documents to a matrix of token counts.
This implementation produces a sparse representation of the counts using
scipy.sparse.csr_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : {'filename', 'file', 'content'}, default='content'
- If `'filename'`, the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
- If `'file'`, the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
- If `'content'`, the input is expected to be a sequence of items that
can be of type string or byte.
encoding : str, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}, default='strict'
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode'}, default=None
Remove accents and perform other character normalization
during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
Both 'ascii' and 'unicode' use NFKD normalization from
:func:`unicodedata.normalize`.
lowercase : bool, default=True
Convert all characters to lowercase before tokenizing.
preprocessor : callable, default=None
Override the preprocessing (strip_accents and lowercase) stage while
preserving the tokenizing and n-grams generation steps.
Only applies if ``analyzer`` is not callable.
tokenizer : callable, default=None
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
stop_words : {'english'}, list, default=None
If 'english', a built-in stop word list for English is used.
There are several known issues with 'english' and you should
consider an alternative (see :ref:`stop_words`).
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
token_pattern : str, default=r"(?u)\\b\\w\\w+\\b"
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
If there is a capturing group in token_pattern then the
captured group content, not the entire match, becomes the token.
At most one capturing group is permitted.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
word n-grams or char n-grams to be extracted. All values of n such
such that min_n <= n <= max_n will be used. For example an
``ngram_range`` of ``(1, 1)`` means only unigrams, ``(1, 2)`` means
unigrams and bigrams, and ``(2, 2)`` means only bigrams.
Only applies if ``analyzer`` is not callable.
analyzer : {'word', 'char', 'char_wb'} or callable, default='word'
Whether the feature should be made of word n-gram or character
n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries; n-grams at the edges of words are padded with space.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
.. versionchanged:: 0.21
Since v0.21, if ``input`` is ``filename`` or ``file``, the data is
first read from the file and then passed to the given callable
analyzer.
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, default=None
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : bool, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, default=np.int64
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
fixed_vocabulary_ : bool
True if a fixed vocabulary of term to indices mapping
is provided by the user.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See Also
--------
HashingVectorizer : Convert a collection of text documents to a
matrix of token counts.
TfidfVectorizer : Convert a collection of raw documents to a matrix
of TF-IDF features.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
Examples
--------
>>> from sklearn.feature_extraction.text import CountVectorizer
>>> corpus = [
... 'This is the first document.',
... 'This document is the second document.',
... 'And this is the third one.',
... 'Is this the first document?',
... ]
>>> vectorizer = CountVectorizer()
>>> X = vectorizer.fit_transform(corpus)
>>> vectorizer.get_feature_names_out()
array(['and', 'document', 'first', 'is', 'one', 'second', 'the', 'third',
'this'], ...)
>>> print(X.toarray())
[[0 1 1 1 0 0 1 0 1]
[0 2 0 1 0 1 1 0 1]
[1 0 0 1 1 0 1 1 1]
[0 1 1 1 0 0 1 0 1]]
>>> vectorizer2 = CountVectorizer(analyzer='word', ngram_range=(2, 2))
>>> X2 = vectorizer2.fit_transform(corpus)
>>> vectorizer2.get_feature_names_out()
array(['and this', 'document is', 'first document', 'is the', 'is this',
'second document', 'the first', 'the second', 'the third', 'third one',
'this document', 'this is', 'this the'], ...)
>>> print(X2.toarray())
[[0 0 1 1 0 0 1 0 0 0 0 1 0]
[0 1 0 1 0 1 0 1 0 0 1 0 0]
[1 0 0 1 0 0 0 0 1 1 0 1 0]
[0 0 1 0 1 0 1 0 0 0 0 0 1]]
"""
def __init__(
self,
*,
input="content",
encoding="utf-8",
decode_error="strict",
strip_accents=None,
lowercase=True,
preprocessor=None,
tokenizer=None,
stop_words=None,
token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1),
analyzer="word",
max_df=1.0,
min_df=1,
max_features=None,
vocabulary=None,
binary=False,
dtype=np.int64,
):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
self.max_features = max_features
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(vocabulary.items())
map_index = np.empty(len(sorted_features), dtype=X.indices.dtype)
for new_val, (term, old_val) in enumerate(sorted_features):
vocabulary[term] = new_val
map_index[old_val] = new_val
X.indices = map_index.take(X.indices, mode="clip")
return X
def _limit_features(self, X, vocabulary, high=None, low=None, limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
tfs = np.asarray(X.sum(axis=0)).ravel()
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(vocabulary.items()):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError(
"After pruning, no terms remain. Try a lower min_df or a higher max_df."
)
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = []
indptr = []
values = _make_int_array()
indptr.append(0)
for doc in raw_documents:
feature_counter = {}
for feature in analyze(doc):
try:
feature_idx = vocabulary[feature]
if feature_idx not in feature_counter:
feature_counter[feature_idx] = 1
else:
feature_counter[feature_idx] += 1
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
j_indices.extend(feature_counter.keys())
values.extend(feature_counter.values())
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError(
"empty vocabulary; perhaps the documents only contain stop words"
)
if indptr[-1] > np.iinfo(np.int32).max: # = 2**31 - 1
if _IS_32BIT:
raise ValueError(
(
"sparse CSR array has {} non-zero "
"elements and requires 64 bit indexing, "
"which is unsupported with 32 bit Python."
).format(indptr[-1])
)
indices_dtype = np.int64
else:
indices_dtype = np.int32
j_indices = np.asarray(j_indices, dtype=indices_dtype)
indptr = np.asarray(indptr, dtype=indices_dtype)
values = np.frombuffer(values, dtype=np.intc)
X = sp.csr_matrix(
(values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype,
)
X.sort_indices()
return vocabulary, X
def _validate_params(self):
"""Validation of min_df, max_df and max_features"""
super()._validate_params()
if self.max_features is not None:
check_scalar(self.max_features, "max_features", numbers.Integral, min_val=0)
if isinstance(self.min_df, numbers.Integral):
check_scalar(self.min_df, "min_df", numbers.Integral, min_val=0)
else:
check_scalar(self.min_df, "min_df", numbers.Real, min_val=0.0, max_val=1.0)
if isinstance(self.max_df, numbers.Integral):
check_scalar(self.max_df, "max_df", numbers.Integral, min_val=0)
else:
check_scalar(self.max_df, "max_df", numbers.Real, min_val=0.0, max_val=1.0)
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which generates either str, unicode or file objects.
y : None
This parameter is ignored.
Returns
-------
self : object
Fitted vectorizer.
"""
self._warn_for_unused_params()
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return document-term matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which generates either str, unicode or file objects.
y : None
This parameter is ignored.
Returns
-------
X : array of shape (n_samples, n_features)
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
if isinstance(raw_documents, str):
raise ValueError(
"Iterable over raw text documents expected, string object received."
)
self._validate_params()
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
if self.fixed_vocabulary_ and self.lowercase:
for term in self.vocabulary:
if any(map(str.isupper, term)):
warnings.warn(
"Upper case characters found in"
" vocabulary while 'lowercase'"
" is True. These entries will not"
" be matched with any documents"
)
break
vocabulary, X = self._count_vocab(raw_documents, self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
n_doc = X.shape[0]
max_doc_count = (
max_df if isinstance(max_df, numbers.Integral) else max_df * n_doc
)
min_doc_count = (
min_df if isinstance(min_df, numbers.Integral) else min_df * n_doc
)
if max_doc_count < min_doc_count:
raise ValueError("max_df corresponds to < documents than min_df")
if max_features is not None:
X = self._sort_features(X, vocabulary)
X, self.stop_words_ = self._limit_features(
X, vocabulary, max_doc_count, min_doc_count, max_features
)
if max_features is None:
X = self._sort_features(X, vocabulary)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which generates either str, unicode or file objects.
Returns
-------
X : sparse matrix of shape (n_samples, n_features)
Document-term matrix.
"""
if isinstance(raw_documents, str):
raise ValueError(
"Iterable over raw text documents expected, string object received."
)
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document-term matrix.
Returns
-------
X_inv : list of arrays of shape (n_samples,)
List of arrays of terms.
"""
self._check_vocabulary()
# We need CSR format for fast row manipulations.
X = check_array(X, accept_sparse="csr")
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
if sp.issparse(X):
return [
inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)
]
else:
return [
inverse_vocabulary[np.flatnonzero(X[i, :])].ravel()
for i in range(n_samples)
]
@deprecated(
"get_feature_names is deprecated in 1.0 and will be removed "
"in 1.2. Please use get_feature_names_out instead."
)
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name.
Returns
-------
feature_names : list
A list of feature names.
"""
self._check_vocabulary()
return [t for t, i in sorted(self.vocabulary_.items(), key=itemgetter(1))]
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Not used, present here for API consistency by convention.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
self._check_vocabulary()
return np.asarray(
[t for t, i in sorted(self.vocabulary_.items(), key=itemgetter(1))],
dtype=object,
)
def _more_tags(self):
return {"X_types": ["string"]}
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(_OneToOneFeatureMixin, TransformerMixin, BaseEstimator):
"""Transform a count matrix to a normalized tf or tf-idf representation.
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The formula that is used to compute the tf-idf for a term t of a document d
in a document set is tf-idf(t, d) = tf(t, d) * idf(t), and the idf is
computed as idf(t) = log [ n / df(t) ] + 1 (if ``smooth_idf=False``), where
n is the total number of documents in the document set and df(t) is the
document frequency of t; the document frequency is the number of documents
in the document set that contain the term t. The effect of adding "1" to
the idf in the equation above is that terms with zero idf, i.e., terms
that occur in all documents in a training set, will not be entirely
ignored.
(Note that the idf formula above differs from the standard textbook
notation that defines the idf as
idf(t) = log [ n / (df(t) + 1) ]).
If ``smooth_idf=True`` (the default), the constant "1" is added to the
numerator and denominator of the idf as if an extra document was seen
containing every term in the collection exactly once, which prevents
zero divisions: idf(t) = log [ (1 + n) / (1 + df(t)) ] + 1.
Furthermore, the formulas used to compute tf and idf depend
on parameter settings that correspond to the SMART notation used in IR
as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when
``sublinear_tf=True``.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when ``norm='l2'``, "n" (none)
when ``norm=None``.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : {'l1', 'l2'}, default='l2'
Each output row will have unit norm, either:
- 'l2': Sum of squares of vector elements is 1. The cosine
similarity between two vectors is their dot product when l2 norm has
been applied.
- 'l1': Sum of absolute values of vector elements is 1.
See :func:`preprocessing.normalize`.
use_idf : bool, default=True
Enable inverse-document-frequency reweighting. If False, idf(t) = 1.
smooth_idf : bool, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : bool, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
idf_ : array of shape (n_features)
The inverse document frequency (IDF) vector; only defined
if ``use_idf`` is True.
.. versionadded:: 0.20
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 1.0
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
CountVectorizer : Transforms text into a sparse matrix of n-gram counts.
TfidfVectorizer : Convert a collection of raw documents to a matrix of
TF-IDF features.
HashingVectorizer : Convert a collection of text documents to a matrix
of token occurrences.
References
----------
.. [Yates2011] R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.
.. [MRS2008] C.D. Manning, P. Raghavan and H. Schütze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.
Examples
--------
>>> from sklearn.feature_extraction.text import TfidfTransformer
>>> from sklearn.feature_extraction.text import CountVectorizer
>>> from sklearn.pipeline import Pipeline
>>> corpus = ['this is the first document',
... 'this document is the second document',
... 'and this is the third one',
... 'is this the first document']
>>> vocabulary = ['this', 'document', 'first', 'is', 'second', 'the',
... 'and', 'one']
>>> pipe = Pipeline([('count', CountVectorizer(vocabulary=vocabulary)),
... ('tfid', TfidfTransformer())]).fit(corpus)
>>> pipe['count'].transform(corpus).toarray()
array([[1, 1, 1, 1, 0, 1, 0, 0],
[1, 2, 0, 1, 1, 1, 0, 0],
[1, 0, 0, 1, 0, 1, 1, 1],
[1, 1, 1, 1, 0, 1, 0, 0]])
>>> pipe['tfid'].idf_
array([1. , 1.22314355, 1.51082562, 1. , 1.91629073,
1. , 1.91629073, 1.91629073])
>>> pipe.transform(corpus).shape
(4, 8)
"""
def __init__(self, *, norm="l2", use_idf=True, smooth_idf=True, sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights).
Parameters
----------
X : sparse matrix of shape n_samples, n_features)
A matrix of term/token counts.
y : None
This parameter is not needed to compute tf-idf.
Returns
-------
self : object
Fitted transformer.
"""
# large sparse data is not supported for 32bit platforms because
# _document_frequency uses np.bincount which works on arrays of
# dtype NPY_INTP which is int32 for 32bit platforms. See #20923
X = self._validate_data(
X, accept_sparse=("csr", "csc"), accept_large_sparse=not _IS_32BIT
)
if not sp.issparse(X):
X = sp.csr_matrix(X)
dtype = X.dtype if X.dtype in FLOAT_DTYPES else np.float64
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
df = df.astype(dtype, **_astype_copy_false(df))
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(n_samples / df) + 1
self._idf_diag = sp.diags(
idf,
offsets=0,
shape=(n_features, n_features),
format="csr",
dtype=dtype,
)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation.
Parameters
----------
X : sparse matrix of (n_samples, n_features)
A matrix of term/token counts.
copy : bool, default=True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix of shape (n_samples, n_features)
Tf-idf-weighted document-term matrix.
"""
X = self._validate_data(
X, accept_sparse="csr", dtype=FLOAT_DTYPES, copy=copy, reset=False
)
if not sp.issparse(X):
X = sp.csr_matrix(X, dtype=np.float64)
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
# idf_ being a property, the automatic attributes detection
# does not work as usual and we need to specify the attribute
# name:
check_is_fitted(self, attributes=["idf_"], msg="idf vector is not fitted")
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
"""Inverse document frequency vector, only defined if `use_idf=True`.
Returns
-------
ndarray of shape (n_features,)
"""
# if _idf_diag is not set, this will raise an attribute error,
# which means hasattr(self, "idf_") is False
return np.ravel(self._idf_diag.sum(axis=0))
@idf_.setter
def idf_(self, value):
value = np.asarray(value, dtype=np.float64)
n_features = value.shape[0]
self._idf_diag = sp.spdiags(
value, diags=0, m=n_features, n=n_features, format="csr"
)
def _more_tags(self):
return {"X_types": ["2darray", "sparse"]}
class TfidfVectorizer(CountVectorizer):
r"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to :class:`CountVectorizer` followed by
:class:`TfidfTransformer`.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : {'filename', 'file', 'content'}, default='content'
- If `'filename'`, the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
- If `'file'`, the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
- If `'content'`, the input is expected to be a sequence of items that
can be of type string or byte.
encoding : str, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}, default='strict'
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode'}, default=None
Remove accents and perform other character normalization
during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
Both 'ascii' and 'unicode' use NFKD normalization from
:func:`unicodedata.normalize`.
lowercase : bool, default=True
Convert all characters to lowercase before tokenizing.
preprocessor : callable, default=None
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
Only applies if ``analyzer`` is not callable.
tokenizer : callable, default=None
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
analyzer : {'word', 'char', 'char_wb'} or callable, default='word'
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries; n-grams at the edges of words are padded with space.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
.. versionchanged:: 0.21
Since v0.21, if ``input`` is ``'filename'`` or ``'file'``, the data
is first read from the file and then passed to the given callable
analyzer.
stop_words : {'english'}, list, default=None
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
There are several known issues with 'english' and you should
consider an alternative (see :ref:`stop_words`).
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
token_pattern : str, default=r"(?u)\\b\\w\\w+\\b"
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
If there is a capturing group in token_pattern then the
captured group content, not the entire match, becomes the token.
At most one capturing group is permitted.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used. For example an ``ngram_range`` of ``(1, 1)`` means only
unigrams, ``(1, 2)`` means unigrams and bigrams, and ``(2, 2)`` means
only bigrams.
Only applies if ``analyzer`` is not callable.
max_df : float or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float in range [0.0, 1.0], the parameter represents a proportion of
documents, integer absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float in range of [0.0, 1.0], the parameter represents a proportion
of documents, integer absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, default=None
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : bool, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs).
dtype : dtype, default=float64
Type of the matrix returned by fit_transform() or transform().
norm : {'l1', 'l2'}, default='l2'
Each output row will have unit norm, either:
- 'l2': Sum of squares of vector elements is 1. The cosine
similarity between two vectors is their dot product when l2 norm has
been applied.
- 'l1': Sum of absolute values of vector elements is 1.
See :func:`preprocessing.normalize`.
use_idf : bool, default=True
Enable inverse-document-frequency reweighting. If False, idf(t) = 1.
smooth_idf : bool, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : bool, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
fixed_vocabulary_ : bool
True if a fixed vocabulary of term to indices mapping
is provided by the user.
idf_ : array of shape (n_features,)
The inverse document frequency (IDF) vector; only defined
if ``use_idf`` is True.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See Also
--------
CountVectorizer : Transforms text into a sparse matrix of n-gram counts.
TfidfTransformer : Performs the TF-IDF transformation from a provided
matrix of counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
Examples
--------
>>> from sklearn.feature_extraction.text import TfidfVectorizer
>>> corpus = [
... 'This is the first document.',
... 'This document is the second document.',
... 'And this is the third one.',
... 'Is this the first document?',
... ]
>>> vectorizer = TfidfVectorizer()
>>> X = vectorizer.fit_transform(corpus)
>>> vectorizer.get_feature_names_out()
array(['and', 'document', 'first', 'is', 'one', 'second', 'the', 'third',
'this'], ...)
>>> print(X.shape)
(4, 9)
"""
def __init__(
self,
*,
input="content",
encoding="utf-8",
decode_error="strict",
strip_accents=None,
lowercase=True,
preprocessor=None,
tokenizer=None,
analyzer="word",
stop_words=None,
token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1),
max_df=1.0,
min_df=1,
max_features=None,
vocabulary=None,
binary=False,
dtype=np.float64,
norm="l2",
use_idf=True,
smooth_idf=True,
sublinear_tf=False,
):
super().__init__(
input=input,
encoding=encoding,
decode_error=decode_error,
strip_accents=strip_accents,
lowercase=lowercase,
preprocessor=preprocessor,
tokenizer=tokenizer,
analyzer=analyzer,
stop_words=stop_words,
token_pattern=token_pattern,
ngram_range=ngram_range,
max_df=max_df,
min_df=min_df,
max_features=max_features,
vocabulary=vocabulary,
binary=binary,
dtype=dtype,
)
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def idf_(self):
"""Inverse document frequency vector, only defined if `use_idf=True`.
Returns
-------
ndarray of shape (n_features,)
"""
if not hasattr(self, "_tfidf"):
raise NotFittedError(
f"{self.__class__.__name__} is not fitted yet. Call 'fit' with "
"appropriate arguments before using this attribute."
)
return self._tfidf.idf_
@idf_.setter
def idf_(self, value):
if not self.use_idf:
raise ValueError("`idf_` cannot be set when `user_idf=False`.")
if not hasattr(self, "_tfidf"):
# We should support transfering `idf_` from another `TfidfTransformer`
# and therefore, we need to create the transformer instance it does not
# exist yet.
self._tfidf = TfidfTransformer(
norm=self.norm,
use_idf=self.use_idf,
smooth_idf=self.smooth_idf,
sublinear_tf=self.sublinear_tf,
)
self._validate_vocabulary()
if hasattr(self, "vocabulary_"):
if len(self.vocabulary_) != len(value):
raise ValueError(
"idf length = %d must be equal to vocabulary size = %d"
% (len(value), len(self.vocabulary))
)
self._tfidf.idf_ = value
def _check_params(self):
if self.dtype not in FLOAT_DTYPES:
warnings.warn(
"Only {} 'dtype' should be used. {} 'dtype' will "
"be converted to np.float64.".format(FLOAT_DTYPES, self.dtype),
UserWarning,
)
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
An iterable which generates either str, unicode or file objects.
y : None
This parameter is not needed to compute tfidf.
Returns
-------
self : object
Fitted vectorizer.
"""
self._check_params()
self._warn_for_unused_params()
self._tfidf = TfidfTransformer(
norm=self.norm,
use_idf=self.use_idf,
smooth_idf=self.smooth_idf,
sublinear_tf=self.sublinear_tf,
)
X = super().fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return document-term matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which generates either str, unicode or file objects.
y : None
This parameter is ignored.
Returns
-------
X : sparse matrix of (n_samples, n_features)
Tf-idf-weighted document-term matrix.
"""
self._check_params()
self._tfidf = TfidfTransformer(
norm=self.norm,
use_idf=self.use_idf,
smooth_idf=self.smooth_idf,
sublinear_tf=self.sublinear_tf,
)
X = super().fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
An iterable which generates either str, unicode or file objects.
Returns
-------
X : sparse matrix of (n_samples, n_features)
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, msg="The TF-IDF vectorizer is not fitted")
X = super().transform(raw_documents)
return self._tfidf.transform(X, copy=False)
def _more_tags(self):
return {"X_types": ["string"], "_skip_test": True}
| sergeyf/scikit-learn | sklearn/feature_extraction/text.py | Python | bsd-3-clause | 74,905 |
"""
WSGI config for recipefinder project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "recipefinder.settings")
application = get_wsgi_application()
| miguelp1986/recipe-finder | recipefinder/recipefinder/wsgi.py | Python | mit | 401 |
#print "Hello World!"
#print "Hello Again"
#print "I like typing this, really?"
#print "This is 20% cooler"
print 'Single quote madness?'
#print "'Hello' Clarice"
#print 'There once was a man from "St Ives"'
| vanonselenp/Learning | Python/LPTHW/ex1.py | Python | mit | 208 |
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import traceback,sys
class WafError(Exception):
def __init__(self,msg='',ex=None):
self.msg=msg
assert not isinstance(msg,Exception)
self.stack=[]
if ex:
if not msg:
self.msg=str(ex)
if isinstance(ex,WafError):
self.stack=ex.stack
else:
self.stack=traceback.extract_tb(sys.exc_info()[2])
self.stack+=traceback.extract_stack()[:-1]
self.verbose_msg=''.join(traceback.format_list(self.stack))
def __str__(self):
return str(self.msg)
class BuildError(WafError):
def __init__(self,error_tasks=[]):
self.tasks=error_tasks
WafError.__init__(self,self.format_error())
def format_error(self):
lst=['Build failed']
for tsk in self.tasks:
txt=tsk.format_error()
if txt:lst.append(txt)
return'\n'.join(lst)
class ConfigurationError(WafError):
pass
class TaskRescan(WafError):
pass
class TaskNotReady(WafError):
pass
| Gnurou/glmark2 | waflib/Errors.py | Python | gpl-3.0 | 984 |
# coding: utf-8
"""
Module where grappelli dashboard modules classes are defined.
"""
# DJANGO IMPORTS
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy as _
from django.apps import apps as django_apps
# GRAPPELLI IMPORTS
from grappelli.dashboard.utils import AppListElementMixin
class DashboardModule(object):
"""
Base class for all dashboard modules.
Dashboard modules have the following properties:
``collapsible``
Boolean that determines whether the module is collapsible, this
allows users to show/hide module content. Default: ``True``.
``column``
Integer that corresponds to the column.
Default: None.
``title``
String that contains the module title, make sure you use the django
gettext functions if your application is multilingual.
Default value: ''.
``title_url``
String that contains the module title URL. If given the module
title will be a link to this URL. Default value: ``None``.
``css_classes``
A list of css classes to be added to the module ``div`` class
attribute. Default value: ``None``.
``pre_content``
Text or HTML content to display above the module content.
Default value: ``None``.
``post_content``
Text or HTML content to display under the module content.
Default value: ``None``.
``template``
The template to use to render the module.
Default value: 'grappelli/dashboard/module.html'.
"""
template = 'grappelli/dashboard/module.html'
collapsible = True
column = None
show_title = True
title = ''
title_url = None
css_classes = None
pre_content = None
post_content = None
children = None
def __init__(self, title=None, **kwargs):
if title is not None:
self.title = title
for key in kwargs:
if hasattr(self.__class__, key):
setattr(self, key, kwargs[key])
self.children = self.children or []
self.css_classes = self.css_classes or []
# boolean flag to ensure that the module is initialized only once
self._initialized = False
def init_with_context(self, context):
"""
Like for the :class:`~grappelli.dashboard.Dashboard` class, dashboard
modules have a ``init_with_context`` method that is called with a
``django.template.RequestContext`` instance as unique argument.
This gives you enough flexibility to build complex modules, for
example, let's build a "history" dashboard module, that will list the
last ten visited pages::
from grappelli.dashboard import modules
class HistoryDashboardModule(modules.LinkList):
title = 'History'
def init_with_context(self, context):
request = context['request']
# we use sessions to store the visited pages stack
history = request.session.get('history', [])
for item in history:
self.children.append(item)
# add the current page to the history
history.insert(0, {
'title': context['title'],
'url': request.META['PATH_INFO']
})
if len(history) > 10:
history = history[:10]
request.session['history'] = history
"""
pass
def is_empty(self):
"""
Return True if the module has no content and False otherwise.
"""
return self.pre_content is None and self.post_content is None and len(self.children) == 0
def render_css_classes(self):
"""
Return a string containing the css classes for the module.
"""
ret = ['grp-dashboard-module']
if self.collapsible:
ret.append('grp-collapse')
if "grp-open" not in self.css_classes and "grp-closed" not in self.css_classes:
ret.append('grp-open')
ret += self.css_classes
return ' '.join(ret)
class Group(DashboardModule):
"""
Represents a group of modules.
Here's an example of modules group::
from grappelli.dashboard import modules, Dashboard
class MyDashboard(Dashboard):
def __init__(self, **kwargs):
Dashboard.__init__(self, **kwargs)
self.children.append(modules.Group(
title="My group",
children=[
modules.AppList(
title='Administration',
models=('django.contrib.*',)
),
modules.AppList(
title='Applications',
exclude=('django.contrib.*',)
)
]
))
"""
template = 'grappelli/dashboard/modules/group.html'
def init_with_context(self, context):
if self._initialized:
return
for module in self.children:
module.init_with_context(context)
self._initialized = True
def is_empty(self):
"""
A group of modules is considered empty if it has no children or if
all its children are empty.
"""
if super(Group, self).is_empty():
return True
for child in self.children:
if not child.is_empty():
return False
return True
class LinkList(DashboardModule):
"""
A module that displays a list of links.
"""
title = _('Links')
template = 'grappelli/dashboard/modules/link_list.html'
def init_with_context(self, context):
if self._initialized:
return
new_children = []
for link in self.children:
if isinstance(link, (tuple, list,)):
link_dict = {'title': link[0], 'url': link[1]}
if len(link) >= 3:
link_dict['external'] = link[2]
if len(link) >= 4:
link_dict['description'] = link[3]
new_children.append(link_dict)
else:
new_children.append(link)
self.children = new_children
self._initialized = True
class AppList(DashboardModule, AppListElementMixin):
"""
Module that lists installed apps and their models.
"""
title = _('Applications')
template = 'grappelli/dashboard/modules/app_list.html'
models = None
exclude = None
def __init__(self, title=None, **kwargs):
self.models = list(kwargs.pop('models', []))
self.exclude = list(kwargs.pop('exclude', []))
super(AppList, self).__init__(title, **kwargs)
def init_with_context(self, context):
if self._initialized:
return
items = self._visible_models(context['request'])
apps = {}
for model, perms in items:
app_label = model._meta.app_label
if app_label not in apps:
apps[app_label] = {
'name': django_apps.get_app_config(app_label).verbose_name,
'title': capfirst(app_label.title()),
'url': self._get_admin_app_list_url(model, context),
'models': []
}
model_dict = {}
model_dict['title'] = capfirst(model._meta.verbose_name_plural)
if perms['change']:
model_dict['admin_url'] = self._get_admin_change_url(model, context)
if perms['add']:
model_dict['add_url'] = self._get_admin_add_url(model, context)
apps[app_label]['models'].append(model_dict)
apps_sorted = list(apps.keys())
apps_sorted.sort()
for app in apps_sorted:
# sort model list alphabetically
apps[app]['models'].sort(key=lambda i: i['title'])
self.children.append(apps[app])
self._initialized = True
class ModelList(DashboardModule, AppListElementMixin):
"""
Module that lists a set of models.
"""
template = 'grappelli/dashboard/modules/model_list.html'
models = None
exclude = None
def __init__(self, title=None, models=None, exclude=None, **kwargs):
self.models = list(models or [])
self.exclude = list(exclude or [])
super(ModelList, self).__init__(title, **kwargs)
def init_with_context(self, context):
if self._initialized:
return
items = self._visible_models(context['request'])
if not items:
return
for model, perms in items:
model_dict = {}
model_dict['title'] = capfirst(model._meta.verbose_name_plural)
if perms['change']:
model_dict['admin_url'] = self._get_admin_change_url(model, context)
if perms['add']:
model_dict['add_url'] = self._get_admin_add_url(model, context)
self.children.append(model_dict)
self._initialized = True
class RecentActions(DashboardModule):
"""
Module that lists the recent actions for the current user.
"""
title = _('Recent Actions')
template = 'grappelli/dashboard/modules/recent_actions.html'
limit = 10
include_list = None
exclude_list = None
def __init__(self, title=None, limit=10, include_list=None,
exclude_list=None, **kwargs):
self.include_list = include_list or []
self.exclude_list = exclude_list or []
kwargs.update({'limit': limit})
super(RecentActions, self).__init__(title, **kwargs)
def init_with_context(self, context):
if self._initialized:
return
from django.db.models import Q
from django.contrib.admin.models import LogEntry
request = context['request']
def get_qset(list):
from django.contrib.contenttypes.models import ContentType
qset = None
for contenttype in list:
if isinstance(contenttype, ContentType):
current_qset = Q(content_type__id=contenttype.id)
else:
try:
app_label, model = contenttype.split('.')
except:
raise ValueError('Invalid contenttype: "%s"' % contenttype)
current_qset = Q(
content_type__app_label=app_label,
content_type__model=model
)
if qset is None:
qset = current_qset
else:
qset = qset | current_qset
return qset
if request.user is None:
qs = LogEntry.objects.all()
else:
qs = LogEntry.objects.filter(user__pk__exact=request.user.pk)
if self.include_list:
qs = qs.filter(get_qset(self.include_list))
if self.exclude_list:
qs = qs.exclude(get_qset(self.exclude_list))
self.children = qs.select_related('content_type', 'user')[:self.limit]
self._initialized = True
class Feed(DashboardModule):
"""
Class that represents a feed dashboard module.
"""
title = _('RSS Feed')
template = 'grappelli/dashboard/modules/feed.html'
feed_url = None
limit = None
def __init__(self, title=None, feed_url=None, limit=None, **kwargs):
kwargs.update({'feed_url': feed_url, 'limit': limit})
super(Feed, self).__init__(title, **kwargs)
def init_with_context(self, context):
if self._initialized:
return
import datetime
if self.feed_url is None:
raise ValueError('You must provide a valid feed URL')
try:
import feedparser
except ImportError:
self.children.append({
'title': ('You must install the FeedParser python module'),
'warning': True,
})
return
feed = feedparser.parse(self.feed_url)
if self.limit is not None:
entries = feed['entries'][:self.limit]
else:
entries = feed['entries']
for entry in entries:
entry.url = entry.link
try:
entry.date = datetime.date(*entry.updated_parsed[0:3])
except:
# no date for certain feeds
pass
self.children.append(entry)
self._initialized = True
| sivaprakashniet/push_pull | p2p/lib/python2.7/site-packages/grappelli/dashboard/modules.py | Python | bsd-3-clause | 12,712 |
"""
Fetch Build IDs from ELF core
In --list mode, print two names for each file,
one from the file note, and the other from the link map.
The first file (the executable) is not in the link map.
The names can differ because of symbolic links.
"""
from argparse import ArgumentParser
from . import memmap
from .elf import Core, Elf
def main():
""" Fetch Build IDs in ELF core """
parser = ArgumentParser()
parser.add_argument("--list", action='store_true')
parser.add_argument("--prefix", type=str, default='')
parser.add_argument("file")
args = parser.parse_args()
core = Core(memmap(args.file), args.file)
linkmap = {linkmap.addr: linkmap.name for linkmap in core.linkmap}
for addr, elf in core.elves():
name, build_id = elf.name, elf.build_id()
if args.list:
print("{:016x} {} {} ({})".format(addr, build_id, name, linkmap.get(addr)))
else:
try:
elf_id = Elf(memmap(args.prefix + name), name).build_id()
assert elf_id == build_id, "{}: {} != {}".format(name, elf_id, build_id)
except (AssertionError, FileNotFoundError) as exc:
print(build_id, exc)
| wackrat/structer | structer/build_ids.py | Python | mit | 1,198 |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from .validators import is_item_iterable
def coerce_sequence_of_tuple(sequence):
"""Make sure all items of a sequence are of type tuple.
Parameters
----------
sequence : sequence
A sequence of items.
Returns
-------
list[tuple]
A list containing the items of the original sequence,
with each iterable item converted to a tuple,
and non-iterable items wrapped in a tuple.
Examples
--------
>>> items = coerce_sequence_of_tuple(['a', 1, (None, ), [2.0, 3.0]])
>>> is_sequence_of_tuple(items)
True
"""
items = []
for item in sequence:
if not isinstance(item, tuple):
if not is_item_iterable(item):
item = (item, )
else:
item = tuple(item)
items.append(item)
return items
def coerce_sequence_of_list(sequence):
"""Make sure all items of a sequence are of type list.
Parameters
----------
sequence : sequence
A sequence of items.
Returns
-------
list[list]
A list containing the items of the original sequence,
with each iterable item converted to a list,
and non-iterable items wrapped in a list.
Examples
--------
>>> items = coerce_sequence_of_list(['a', 1, (None, ), [2.0, 3.0]])
>>> is_sequence_of_list(items)
True
"""
items = []
for item in sequence:
if not isinstance(item, list):
if not is_item_iterable(item):
item = [item]
else:
item = list(item)
items.append(item)
return items
| compas-dev/compas | src/compas/data/coercion.py | Python | mit | 1,742 |
#!/usr/bin/env python
#
############################################################################
#
# MODULE: m.swim.subbasins v1.6
# AUTHOR(S): Michel Wortmann, wortmann@pik-potsdam.de
# PURPOSE: Preprocessing suit for the Soil and Water Integrated Model (SWIM)
# COPYRIGHT: (C) 2012-2022 by Wortmann/PIK
#
# This program is free software under the GNU General Public
# License (>=v2). Read the file COPYING that comes with GRASS
# for details.
#
#############################################################################
#%Module
#% description: Soil and Water Integrated Model (SWIM) subbasin preprocessor
#% keywords: hydrological modelling, SWIM, subbasins
#%End
#%Option
#% guisection: Input
#% key: elevation
#% type: string
#% required: yes
#% multiple: no
#% key_desc: name
#% description: Elevation raster (hole-filled)
#% gisprompt: old,cell,raster
#%end
#%Option
#% guisection: Input
#% key: stations
#% type: string
#% required: yes
#% multiple: no
#% key_desc: name
#% label: Station point vector
#% description: Will be snapped to the nearest stream
#% gisprompt: old,vector,vector
#%end
#%Option
#% guisection: Input
#% key: streamthresh
#% type: double
#% required: yes
#% multiple: no
#% key_desc: km2
#% label: Drainage area of smallest stream in km2 (influences station snapping)
#% description: Stations will be snapped to these streams, ie. should not be smaller than the smallest catchment.
#%end
#%Option
#% guisection: Subbasin design
#% key: upthresh
#% type: double
#% required: no
#% multiple: no
#% label: Upper subbasin threshold (in sq km,mostly underestimated)
#% description: ignored if upthreshcolumn is given, mostly underestimated
#% answer: 100
#%end
#%Option
#% guisection: Subbasin design
#% key: lothresh
#% type: double
#% required: no
#% multiple: no
#% key_desc: km2
#% description: Lower threshold of subbasin size in sq km (default 5% of upper)
#%end
#%Option
#% guisection: Subbasin design
#% key: upthreshcolumn
#% type: string
#% required: no
#% multiple: no
#% key_desc: name
#% description: Column with upper subbasin threshold in stations vector
#%end
#%Flag
#% guisection: Topography
#% key: d
#% label: don't process DEM (accumulation, drainage, streams must exist)
#%end
#%Option
#% guisection: Topography
#% key: depression
#% type: string
#% required: no
#% multiple: no
#% key_desc: name
#% description: Raster map of known real depressions in or around catchment (only if not -d)
#% gisprompt: old,cell,raster
#%end
#%Option
#% guisection: Topography
#% key: accumulation
#% type: string
#% required: no
#% multiple: no
#% key_desc: string
#% description: Name of accumlation map to be created (or existing if -d)
#% answer: accumulation
#% gisprompt: new,cell,raster
#%end
#%Option
#% guisection: Topography
#% key: drainage
#% type: string
#% required: no
#% multiple: no
#% key_desc: string
#% description: Name of drainge map to be created (or existing if -d)
#% answer: drainage
#% gisprompt: new,cell,raster
#%end
#%Option
#% guisection: Topography
#% key: streams
#% type: string
#% required: no
#% multiple: no
#% key_desc: string
#% description: Name of streams vector to be created (or existing if -d)
#% answer: streams
#% gisprompt: new,vector,vector
#%end
#%Option
#% guisection: Output
#% key: subbasins
#% type: string
#% required: no
#% multiple: no
#% key_desc: string
#% description: Name of resulting subbasin vector and raster map
#% answer: subbasins
#% gisprompt: new,cell,raster
#%end
#%Option
#% guisection: Output
#% key: catchments
#% type: string
#% required: no
#% multiple: no
#% key_desc: string
#% description: Name of resulting vector and raster of all stations catchments
#% answer: catchments
#% gisprompt: new,cell,raster
#%end
#%Option
#% guisection: Output
#% key: catchmentprefix
#% type: string
#% required: no
#% multiple: no
#% key_desc: string
#% description: Prefix of individual catchment vector for each station
#% answer: catchment_
#% gisprompt: new,cell,raster
#%end
#%Option
#% guisection: Output
#% key: stations_snapped
#% type: string
#% required: no
#% multiple: no
#% key_desc: string
#% description: Stations as snapped to streams plus some additional info in the table
#% answer: stations_snapped
#% gisprompt: new,cell,vector
#%end
#%Option
#% guisection: Output
#% key: slopesteepness
#% type: string
#% required: no
#% multiple: no
#% key_desc: string
#% description: USLE slopesteepness from r.watershed, useful for m.swim.substats
#% answer: slopesteepness
#% gisprompt: new,cell,raster
#%end
#%Option
#% guisection: Output
#% key: slopelength
#% type: string
#% required: no
#% multiple: no
#% key_desc: string
#% description: USLE slope length from r.watershed, useful for m.swim.substats
#% answer: slopelength
#% gisprompt: new,cell,raster
#%end
#%Option
#% guisection: Optional
#% key: streamcarve
#% type: string
#% required: no
#% multiple: no
#% key_desc: vector
#% description: Existing river network to be carved into the elevation raster
#% gisprompt: old,vector,vector
#%end
#%Option
#% guisection: Optional
#% key: predefined
#% type: string
#% required: no
#% multiple: no
#% key_desc: string
#% description: Raster of predefined units to include in the subbasin map
#% gisprompt: old,cell,raster
#%end
#%Option
#% guisection: Optional
#% key: rwatershedflags
#% type: string
#% required: no
#% multiple: no
#% key_desc: string
#% description: Flags parsed to r.watershed, check r.watershed --help
#% answer: s
#%end
#%Option
#% guisection: Optional
#% key: rwatershedmemory
#% type: integer
#% required: no
#% multiple: no
#% key_desc: integer MB
#% description: Limits memory usage of r.watershed in MB (slower)
#%end
#%Flag
#% guisection: Optional
#% key: g
#% label: Create regular grid subbasins interpreting upthresh as the grid size.
#%end
#%Flag
#% guisection: Optional
#% key: l
#% label: Create regular lonlat grid subbasins interpreting upthresh as the grid size.
#%end
#%Flag
#% guisection: Optional
#% key: s
#% label: Just print statistics of subbasins, --o must be set
#%end
#%Flag
#% guisection: Optional
#% key: k
#% label: Keep intermediat files (include __ in names)
#%end
#%Flag
#% guisection: Optional
#% key: v
#% label: Show version and change/install date of this module and grass.
#%end
import sys
import numpy as np
import datetime as dt
from collections import OrderedDict
import grass.script as grass
grun = grass.run_command
gread = grass.read_command
gm = grass.message
gwarn = grass.warning
gdebug = grass.debug
gprogress = grass.core.percent
# cautious Alpha implementation of the mswim abstraction package
try:
path = grass.utils.get_lib_path(modname='m.swim', libname='mswim')
if path:
sys.path.extend(path.split(':'))
import mswim
else:
grass.warning('Unable to find the mswim python library.')
except Exception as e:
grass.warning('An error occurred while loading the mswim python library.\n'+str(e))
mswim = None
def interpret_options(optionsandflags):
options = {}
for o in optionsandflags:
if optionsandflags[o] != '':
try:
options[o] = int(optionsandflags[o]) # int
except ValueError:
try:
options[o] = float(optionsandflags[o]) # float
except ValueError:
options[o] = optionsandflags[o] # str
return options
class main:
def __init__(self, **optionsandflags):
'''Process all arguments and prepare processing'''
# add all options and flags as attributes (only nonempty ones)
self.options = interpret_options(optionsandflags)
self.__dict__.update(self.options)
# save region for convenience
self.region = grass.region()
self.region['kmtocell'] = 10**6 / (self.region['ewres'] * self.region['nsres'])
self.region['celltokm'] = self.region['ewres'] * self.region['nsres'] * 1e-6
# check if DEM to processed or if all inputs set
if not self.is_set('accumulation', 'drainage', 'streams'):
grass.fatal('Either of these not set: accumulation, drainage, streams.')
# lothresh default
if 'lothresh' not in self.options:
self.lothresh = self.upthresh * 0.05
# what to do with upthresh
if self.is_set('upthreshcolumn'):
gm('Will look for upper thresholds in the %s column.' %
self.upthreshcolumn)
# get thresholds from column in station vect
try:
threshs = grass.vector_db_select(
self.stations, columns=self.upthreshcolumn)['values']
self.upthresh = OrderedDict([(k, float(v[0]))
for k, v in sorted(threshs.items())])
except:
grass.fatal('Cant read the upper threshold from the column %s'
% self.upthreshcolumn)
# streamthresh
if 'streamthresh' in self.options:
# convert to cells
self.streamthresh = self.region['kmtocell'] * self.streamthresh
# check if reasonable
fract = float(self.streamthresh) / self.region['cells']
if fract > 0.5 or fract < 0.01:
gwarn('streamthresh is %s percent of the region size!' % (fract*100))
else:
self.streamthresh = int(self.region['cells'] * 0.02)
# if no r.watershed flags given
if 'rwatershedflags' not in self.options:
self.rwatershedflags = 's'
elif 's' not in self.rwatershedflags:
gwarn('rwatershedflags without "s" (single flow direction) will render the '
'accumulation raster incompatible with m.swim.routing!')
if 'rwatershedmemory' in self.options:
self.rwatershedflags += 'm'
else:
# default value/not used
self.rwatershedmemory = 300
# check input for stats print
if self.s:
for o in ['streams', 'stations', 'catchmentprefix']:
if not self.is_set(o):
grass.fatal('%s needs to be set!')
# get all catchments
rst = grass.list_strings('rast', self.catchmentprefix+'*')
rolist = [(int(r.split('@')[0].replace(self.catchmentprefix, '')), r)
for r in sorted(rst) if '__' not in r]
self.catchment_rasters = OrderedDict(rolist)
gm('Found these catchments %s' % self.catchment_rasters)
# calculate station topology
self.snap_stations()
self.get_stations_topology()
# initialise subbasinsdone
self.subbasinsdone = {}
return
def execute(self):
# execute
if self.s:
self.print_statistics()
sys.exit()
# carve streams
if 'streamcarve' in self.options:
gm('Carving streams...')
self.carve_streams()
# process DEM if need be
if not self.d:
self.process_DEM()
# always process
self.snap_stations()
self.make_catchments()
self.make_subbasins()
self.postprocess_catchments()
self.postprocess_subbasins()
self.write_stations_snapped()
self.print_statistics()
# clean
if not self.k:
grun('g.remove', type='raster,vector', pattern='*__*',
flags='fb', quiet=True)
return
def is_set(self, *options):
return all([hasattr(self, i) for i in options])
def process_DEM(self):
"""Make drainage, accumulation and stream raster"""
gm('Processing DEM to derive accumulation, drainage and streams...')
# decide on input arguments #######
# from km2 to cells
if type(self.upthresh) in [int, float]:
uthresh = self.upthresh
else: # take most common one in upthresh column
uthresh = max(set(self.upthresh.values()), key=list(self.upthresh).count)
thresh = self.region['kmtocell'] * uthresh
kwargs = {'elevation': self.elevation,
'threshold': thresh,
# Output
'accumulation': 'accum__float',
'drainage': self.drainage,
'basin': 'standard__subbasins',
'slope_steepness': self.slopesteepness,
'length_slope': self.slopelength,
'flags': self.rwatershedflags,
'memory': self.rwatershedmemory}
# check if depressions
if self.is_set('depression'):
kwargs['depression'] = self.depression
# carve streams
if self.is_set('streamcarve'):
kwargs['elevation'] = self.carvedelevation
grun('r.watershed', **kwargs)
# save subbasins in dictionary
if not (self.g or self.l):
self.subbasinsdone[thresh] = 'standard__subbasins'
# postprocess accumulation map
grass.mapcalc("%s=int(if(accum__float <= 0,null(),accum__float))" %
self.accumulation)
# make river network to vector
gm('Making vector river network...')
# stream condition
scon = '{0} >= {1}'.format(self.accumulation, self.streamthresh)
# extract out of accumulation and make vector
grass.mapcalc(self.streams+"__thick = if(%s, %s, null())" %
(scon, self.accumulation))
grun('r.thin', input=self.streams+'__thick', output=self.streams,
quiet=True)
grun('r.to.vect', flags='s', input=self.streams, output=self.streams,
type='line', quiet=True)
return
def carve_streams(self):
'''Carve vector streams into the DEM, i.e. setting those cells =0'''
gm('Carving %s into the elevation %s...' % (self.streamcarve, self.elevation))
# stream vector to raster cells
self.streamrastcarved = self.streamcarve.split('@')[0]+'__'
grun('v.to.rast', input=self.streamcarve, output=self.streamrastcarved,
type='line', use='val', val=1, quiet=True)
# carve
self.carvedelevation = '%s__carved' % self.elevation.split('@')[0]
grass.mapcalc("%s=if(isnull(%s),%s,0)" % (self.carvedelevation,
self.streamrastcarved, self.elevation))
return
def snap_stations(self):
'''Correct stations by snapping them to the streams vector.
Snapped stations are written out to stations_snapped if given.
'''
warning_threshold = 1000 # m
gm('Snapping stations to streams...')
# types
dtnames = ('stationID', 'distance', 'x', 'y')
dtpy = (int, float, float, float)
# get distances to rivernetwork and nearest x and y
snapped_points = gread('v.distance', flags='p', quiet=True,
from_=self.stations, to=self.streams,
from_type='point', to_type='line',
upload='dist,to_x,to_y').split()
# format, report and reassign stations_snapped_coor
snapped_coor = np.array([tuple(d.split('|'))
for d in snapped_points[1:]],
dtype=list(zip(dtnames, dtpy)))
# warn if above threshold
snapped_over_thresh = snapped_coor[snapped_coor['distance'] >
warning_threshold]
if len(snapped_over_thresh) > 0:
gwarn('These stations were moved further than %sm '
'(stationID: distance):' % warning_threshold)
for i, d in snapped_over_thresh[['stationID', 'distance']]:
gwarn('%i %1.0f' % (i, d))
# warn if snapped to within cell size
near = {}
for i, d, x, y in snapped_coor:
distcells = np.sqrt((snapped_coor['x'] - x)**2 +
(snapped_coor['y'] - y)**2) * self.region['kmtocell']
isnear = np.all((distcells < 2, snapped_coor['stationID'] != i), axis=0)
if isnear.sum() > 0:
near[i] = snapped_coor['stationID'][isnear]
if len(near) > 0:
gwarn('These stations have been snapped to within 2 cells of '
'other stations. This will lead to an incorrect station '
'topology.')
for i, other in near.items():
gwarn('%s > %s' % (i, ', '.join(other.astype(str))))
# save results
lo = [(i, snapped_coor[i]) for i in dtnames]
self.stations_snapped_columns = OrderedDict(lo)
lo = [(i, (x, y)) for i, x, y in snapped_coor[['stationID', 'x', 'y']]]
self.stations_snapped_coor = OrderedDict(lo)
# add accummulation as darea
darea = (rwhat([self.accumulation], self.stations_snapped_coor.values())
.flatten() * self.region['celltokm'])
self.stations_snapped_columns['darea'] = darea
return
def make_catchments(self):
'''Make catchment raster and if catchmentprefix is set also vectors
for all stations'''
self.catchment_rasters = OrderedDict()
nfmt = '%' + '0%ii' % len(str(max(self.stations_snapped_coor.keys())))
gm('Creating catchments...')
# create watersheds for stations
for i, (si, (x, y)) in enumerate(self.stations_snapped_coor.items()):
if self.is_set('catchmentprefix'):
name = self.catchmentprefix + nfmt % si
else:
name = 'watersheds__st%s' % si
gdebug(('station %s' % si))
grun('r.water.outlet', input=self.drainage, output=name+'__all1',
coordinates='%s,%s' % (x, y))
# give watershed number and put 0 to null()
grass.mapcalc(name+' = if('+name+'__all1'+' == 1,%s,null())' % si,
quiet=True)
# make vector of catchment as well
if 'catchmentprefix' in self.options:
grun('r.to.vect', quiet=True, flags='vs',
input=name, output=name, type='area')
# update maps dictionary
self.catchment_rasters[si] = name
# report progress
gprogress(i+1, len(self.stations_snapped_coor), 1)
# check stations topology before patching basins
self.get_stations_topology()
# get stationIDs sorted by number of upstream stations (ascending)
ts = sorted(self.stations_order, key=self.stations_order.get)
catchments_ordered = [self.catchment_rasters[i] for i in ts]
# patch catchments in order
gm('Patching catchments...')
patch_basins(catchments_ordered, outname=self.catchments)
return
def get_stations_topology(self):
'''Return a dictionary with staions as keys and lists of watershed ids
as values'''
gm('Check station topology...')
# get station topology
stopo = rwhat(self.catchment_rasters.values(),
self.stations_snapped_coor.values())
# list of numpy arrays with indeces of nonzero cat values
stationid_array = np.array(list(self.stations_snapped_coor.keys()))
stopo = stopo.transpose()
topo = []
for i, d in enumerate(stopo):
d[i] = 0
s = np.nonzero(d)[0]
topo += [stationid_array[s]]
self.stations_upstream = OrderedDict(zip(self.stations_snapped_coor.keys(), topo))
# save downstream stationID
# get stationIDs sorted by number of upstream stations (ascending)
tslen = OrderedDict([(k, len(v)) for k, v in self.stations_upstream.items()])
ts = sorted(tslen, key=tslen.get)
dsid = OrderedDict()
# find first occurence of id in length sorted downstream ids
for i in self.stations_upstream.keys():
for ii in ts:
if i in self.stations_upstream[ii] and i not in dsid:
dsid[i] = ii
break
if i not in dsid:
dsid[i] = -1
self.stations_snapped_columns['ds_stationID'] = np.array(
list(dsid.values()), dtype=int)
# create topology order
order = {} # unsorted dictionary
for sid in tslen.keys():
# start from all headwaters and increase orders downstream
if tslen[sid] == 0:
order[sid] = 1
ii, oi = sid, 1
while dsid[ii] > 0:
ii = dsid[ii]
oi += 1
order[ii] = max(oi, order[ii]) if ii in order else oi
if oi > len(tslen):
grass.fatal('Station %s seems to have a circular'
'topology (%r)' % (sid, dsid[ii]))
# order it again, if no order was found,
orderlist = [(k, order.pop(k, 1)) for k in tslen.keys()]
self.stations_order = OrderedDict(orderlist)
oarr = np.array(list(self.stations_order.values()), dtype=int)
self.stations_snapped_columns['strahler_order'] = oarr
return
def make_subbasins(self):
"""Create subbasins with corrected stations shape file
with maximum threshold in square km from the maps processed in process_DEM
"""
gm('Creating subbasins...')
self.subbasins_rasters = OrderedDict()
# use upthresh for all if self.upthresh not already converted to list in __init__
if type(self.upthresh) in [int, float]:
self.upthresh = OrderedDict([(i, self.upthresh)
for i in self.stations_upstream])
for i, sid in enumerate(self.stations_upstream.keys()):
# prepare inputs for the subbasins
subbasins_name = 'subbasins__%s' % sid
# calculate threshold from sq km to cells
thresh = self.upthresh[sid] * self.region['kmtocell']
gdebug('Subbasin threshold: %s km2, %s cells' %
(self.upthresh[sid], thresh))
# check if already calculated with that threshold
if thresh in self.subbasinsdone:
subbasins_uncut = self.subbasinsdone[thresh]
gdebug('Using %s, already calculated.' % subbasins_uncut)
else:
subbasins_uncut = subbasins_name+'__uncut'
kwargs = {'elevation': self.elevation,
'basin' : subbasins_uncut,
'threshold': thresh,
'flags' : self.rwatershedflags,
'memory' : self.rwatershedmemory}
# carved elevation
if 'streamcarve' in self.options:
kwargs['elevation'] = self.carvedelevation
# r.watershed to produce subbasins
if self.g or self.l:
self.grid_subbasin(subbasins_uncut, self.upthresh[sid])
else:
grun('r.watershed', quiet=True, **kwargs)
# add to done subbasins list
self.subbasinsdone[thresh] = subbasins_uncut
# cut out subbasins for subarea
exp = ('%s=if(%s==%s, %s, null())' %
(subbasins_name, self.catchments, sid, subbasins_uncut))
grass.mapcalc(exp)
self.subbasins_rasters[sid] = subbasins_name
# report progress
gprogress(i+1, len(self.stations_upstream), 1)
# Make sure no subbasins have the same cat
lastmax = 0 # in case only 1 station is used
for i, (sid, srast) in enumerate(self.subbasins_rasters.items()):
# if more than one subarea add the last max to the current
if i > 0:
grass.mapcalc('{0}__lastmax={0} + {1}'.format(srast, lastmax),
quiet=True)
self.subbasins_rasters[sid] = srast + '__lastmax'
# get classes and check if subbasins were produced
classes = gread('r.stats', input=self.subbasins_rasters[sid],
quiet=True, flags='n').split()
if len(classes) == 0:
gwarn('%s has no subbasins and will be omitted'
' (station too close to others?)' % srast)
continue
lastmax = max(classes)
# report progress
gprogress(i+1, len(self.subbasins_rasters), 1)
if self.is_set('predefined'):
gm('Including predefined subbasins %s...' % self.predefined)
# avoid same numbers occur in subbasins
predef = self.predefined.split('@')[0]+'__aboverange'
grass.mapcalc('$output=if(isnull($c), null(), $p+$m)', m=lastmax,
output=predef, p=self.predefined, c=self.catchments)
# add to beginning of subbasins_rasters
self.subbasins_rasters = OrderedDict(
[('predefined', predef)] +
list(self.subbasins_rasters.items()))
# PATCHING subbasins maps
patch_basins(list(self.subbasins_rasters.values()), outname=self.subbasins)
# clean subbasin raster and vector keeping the same name
self.clean_subbasins()
return
def grid_subbasin(self, output, size):
"""Grid a raster with resolution size, optionally as lonlat."""
if self.l:
env = grass.gisenv()
grun('v.in.region', output='roi__', quiet=True)
# create temporary lonlat location
tmpdir, tmploc = grass.tempdir(), 'lonlat'
grass.core.create_location(tmpdir, tmploc, epsg=4326)
grun('g.mapset', mapset='PERMANENT', location=tmploc, dbase=tmpdir,
quiet=True)
# reproj roi, smax in meters = 200km per degree
grun('v.proj', input='roi__', mapset=env['MAPSET'], quiet=True,
location=env['LOCATION_NAME'], dbase=env['GISDBASE'])
grun('g.region', vector='roi__')
else:
grass.use_temp_region()
# create actual grid raster
grun('g.region', flags='a', res=size)
grass.mapcalc('$o = row()*col()', o=output)
if self.l:
# back to origional location and reproj
grun('g.mapset', mapset=env['MAPSET'], quiet=True,
location=env['LOCATION_NAME'], dbase=env['GISDBASE'])
grun('r.proj', input=output, mapset='PERMANENT',
location=tmploc, dbase=tmpdir, quiet=True)
else:
grass.del_temp_region()
return
def postprocess_catchments(self):
gm('Creating catchments vector map...')
# ## make vector from subbasins to match those
# grun('v.dissolve', quiet=True, input=self.subbasins,
# output=self.catchments, column='catchmentID')
# grun('v.what.rast', map=self.catchments, raster=self.catchments,
# column='catchmentID', type='centroid', quiet=True)
grun('r.to.vect', input=self.catchments, output=self.catchments,
type='area', flags='vst', quiet=True)
grun('v.db.addtable', map=self.catchments, quiet=True,
key='catchmentID', overwrite=True)
grun('v.to.db', map=self.catchments, option='area', units='kilometers',
columns='size', quiet=True)
grun('r.colors', quiet=True, map=self.catchments, color='random')
return
def postprocess_subbasins(self):
gm('Adding subbasin info to subbasin attribute table...')
# add subbasinIDs to stations_snapped and write out stations_snapped
ds_sbid = rwhat([self.subbasins],
self.stations_snapped_coor.values()).flatten()
self.stations_snapped_columns['subbasinID'] = ds_sbid
# assign catchment id
grun('v.what.rast', map=self.subbasins, raster=self.catchments,
column='catchmentID', quiet=True, type='centroid')
# mean,min,max and centroid elevation and subbasin size
cols = ['%s_elevation double' % s
for s in ['average', 'max', 'min', 'centroid']]
grun('v.db.addcolumn', map=self.subbasins, quiet=True,
column=','.join(cols))
grun('v.what.rast', map=self.subbasins, raster=self.elevation,
column='centroid_elevation', type='centroid', quiet=True)
for s in ['min', 'max', 'average']:
grun('r.stats.zonal', base=self.subbasins, cover=self.elevation,
method=s, output='%s__elevation' % s, quiet=True)
grun('v.what.rast', map=self.subbasins, raster='%s__elevation' % s,
column='%s_elevation' % s, type='centroid', quiet=True)
# size
grun('v.to.db', map=self.subbasins, option='area', units='kilometers',
columns='size', quiet=True)
# centroid x,y
grun('v.to.db', map=self.subbasins, option='coor', units='meters',
columns='centroid_x,centroid_y', quiet=True)
# random colormap
grun('r.colors', quiet=True, map=self.subbasins, color='random')
return
def clean_subbasins(self):
'''Make vector and remove areas smaller than lothresh and
make continous vector subbasin numbering with the outlet as 1
and update raster in the process.
Also assigns drainage areas to subbasin table
'''
gm('Cleaning subbasin map...')
tmp_subbasins = '%s__unclean' % self.subbasins
# rename to ovoid overwrite
grun('g.rename', raster=self.subbasins+','+tmp_subbasins, quiet=True)
# add little areas of watershed to subbasin map that arent covered
exp = "subbasins__0=if(~isnull('{1}') & isnull({0}),9999,'{0}')"
grass.mapcalc(exp.format(tmp_subbasins, self.catchments))
# convert subbasins to vector
grun('r.to.vect', quiet=True, flags='', input='subbasins__0',
output=self.subbasins + '__unclean', type='area')
# remove small subbasins smaller than a thenth of threshold (m2)
prunedist = float(np.mean(list(self.upthresh.values())) * 3)
subbasins_cleaned = self.subbasins + '__cleaned'
grun('v.clean', quiet=True, input=self.subbasins+'__unclean', flags='bc',
output=subbasins_cleaned, type='area', tool='rmarea,prune',
thresh='%s,%s' % (self.lothresh*1000**2, prunedist))
grun('v.build', map=subbasins_cleaned, quiet=True)
gm('Assigning continuous categories to subbasins map...')
# TODO, not optimal: make raster and then vector again to have continuous cats
subbasins_continuous = self.subbasins + '__continuous'
grun('v.to.rast', input=subbasins_cleaned, output=subbasins_continuous,
type='area', use='cat', quiet=True)
grun('r.to.vect', quiet=True, flags='s', type='area',
input=subbasins_continuous, output=subbasins_continuous)
# delete default label column
grun('v.db.dropcolumn', map=subbasins_continuous, column='value,label',
quiet=True)
# add separate subbasinID column
grun('v.db.addcolumn', map=subbasins_continuous, columns='subbasinID int',
quiet=True)
grun('v.db.update', map=subbasins_continuous, column='subbasinID',
qcol='cat', quiet=True)
# get drainage area via accumulation map in sq km
grun('r.stats.zonal', base=subbasins_continuous, cover=self.accumulation,
method='max', output='max__accum__cells', quiet=True)
grass.mapcalc("max__accum=max__accum__cells*%s" % self.region['celltokm'],
quiet=True)
# upload to subbasin table
grun('v.db.addcolumn', map=subbasins_continuous, column='darea double',
quiet=True)
grun('v.what.rast', map=subbasins_continuous, raster='max__accum',
column='darea', type='centroid', quiet=True)
# change subbasin with the greatest drainage area (=outlet subbasin) to 1
tbl = get_table(subbasins_continuous, dtype=(int, float),
columns='subbasinID,darea')
# get max cat and old 1 cat
catmax = np.argmax(tbl['darea'])+1
# swap both values
grun('v.db.update', map=subbasins_continuous, column='subbasinID',
where='cat=%s' % catmax, value=1)
grun('v.db.update', map=subbasins_continuous, column='subbasinID',
where='cat=1', value=catmax)
# reclass to subbasinID via copy
grun('g.copy', vect=subbasins_continuous + ',unreclassed__subbasins',
quiet=True)
# TODO: here final self.subbasins vector is created
grun('v.reclass', input='unreclassed__subbasins', output=self.subbasins,
column='subbasinID', quiet=True)
grun('v.db.addtable', map=self.subbasins, key='subbasinID', quiet=True,
overwrite=True)
grun('v.db.join', map=self.subbasins, column='subbasinID',
otable='unreclassed__subbasins', ocolumn='subbasinID', quiet=True)
grun('v.db.dropcolumn', map=self.subbasins, column='cat', quiet=True)
# make raster again
# TODO: here final self.subbasins raster is created
grun('v.to.rast', input=self.subbasins, output=self.subbasins,
use='cat', quiet=True)
return
def write_stations_snapped(self):
"""Write out the stations_snapped to a vector."""
types = {'i': 'int', 'f': 'double'}
# columns
cols = self.stations_snapped_columns
cols_dt = [' '.join([i, types[cols[i].dtype.kind]]) for i in cols.keys()]
cols_fmt = '|'.join(['%'+cols[i].dtype.kind for i in cols.keys()])
data = np.column_stack(list(cols.values()))
# create vector if needed
p = grass.feed_command('v.in.ascii', input='-', x=3, y=4, cat=1, quiet=True,
columns=cols_dt, output=self.stations_snapped)
np.savetxt(p.stdin, data, delimiter='|', fmt=cols_fmt)
p.stdin.close()
p.wait()
# drop x y columns
grun('v.db.dropcolumn', map=self.stations_snapped, columns='x,y')
# add other columns
cat = grass.vector_info(self.stations_snapped)['attribute_primary_key']
grun('g.copy', vector=self.stations+',stations__tmp', quiet=True)
catother = grass.vector_info('stations__tmp')['attribute_primary_key']
grun('v.db.join', map=self.stations_snapped, column=cat,
other_table='stations__tmp', other_column=catother, quiet=True)
return
def print_statistics(self):
'''Output some statistics of the subbasin and subcatchment map'''
# subcatchments
scs = get_table(self.catchments, columns='catchmentID,size',
dtype=(int, float))
# subbasin sizes
sbs = get_table(self.subbasins, dtype=(int, int, float),
columns='subbasinID,catchmentID,size')
outletsb = rwhat([self.subbasins], self.stations_snapped_coor.values())
gm('-----------------------------------------------------------------')
print('''Catchment sizes :
ID excl. upstream incl. upstream outlet subbasin upstream stations''')
for i, a in enumerate(scs):
upix = [np.where(scs['catchmentID'] == c)[0][0]
for c in self.stations_upstream[a[0]] if c in scs['catchmentID']]
upstsize = np.sum(scs['size'][upix])+a[1]
upstst = list(map(str, self.stations_upstream[a[0]]))
upstststr = ', '.join(upstst) if len(upstst) <= 3 else '%s stations' % len(upstst)
print('%3i %14.2f %16.2f %16i %s' % (a[0], a[1], upstsize,
outletsb[i], upstststr))
# compile nice rows with total in the first column (first initialise dict, then add a column for each station)
sub = OrderedDict([('stationID', ['%9s' % 'total']),
('count', ['%8i' % len(sbs)]),
('min', ['%8.2f' % sbs['size'].min()]),
('mean', ['%8.2f' % sbs['size'].mean()]),
('max', ['%8.2f' % sbs['size'].max()])
])
cols = np.unique(sbs['catchmentID'])
for c in cols:
subs = sbs['size'][sbs['catchmentID'] == c]
if len(subs) == 0:
continue # in case sb outside catchments
sub['stationID'] += ['%9i' % c]
sub['count'] += ['%8i' % len(subs)]
sub['min'] += ['%8.2f' % np.min(subs)]
sub['mean'] += ['%8.2f' % np.mean(subs)]
sub['max'] += ['%8.2f' % np.max(subs)]
print('-----------------------------------------------------------------')
print('Subbasin statistics (km2):')
print(' '.join(['%-8s' %c for c in sub.keys()]))
for i in range(len(cols) + 1):
print(' '.join([sub[c][i] for c in sub]))
print('-----------------------------------------------------------------')
return scs, sbs
def rreclass(in_raster, in_list, out_list, proper=True):
"""Reclass a GRASS raster map from via an in list and outlist \n
Patches together the rules, writes it to file, performs r.reclass,
deletes in_raster and rules file and renames the outraster"""
# temporary rules file
temp_rules = grass.tempfile()
# put lists in easy writable numpy array
rules = np.array((in_list, out_list)).transpose()
# write rules to file
np.savetxt(temp_rules, rules, delimiter='=', fmt='%i')
# reclass raster in grass
grun('r.reclass', input=in_raster,
overwrite=True, quiet=True,
output=in_raster + '__',
rules=temp_rules)
# make reclassed raster a proper raster, remove in_rast and rename output
if proper:
grass.mapcalc('__temp=' + in_raster + '__', quiet=True)
grun('g.remove', type='rast', name=in_raster + '__,' + in_raster,
flags='f', quiet=True)
grun('g.rename', rast='__temp,' + in_raster, quiet=True)
return
def rwhat(rasters, coordinates):
'''Get point values of rasters [list] at the coordinates [list of tuple pairs]
'''
# string of coordinate pairs
coor_pairs = ['%s,%s' % tuple(cs) for cs in coordinates]
what = gread('r.what',
map=','.join(rasters),
null=0,
coordinates=','.join(coor_pairs),
separator=',').split('\n')[:-1]
# put category values into numpy array of integers
what_array = np.array(
[list(map(int, l.split(',')[-len(rasters):])) for l in what])
return what_array
def patch_basins(rastlist, outname):
# patch all subbs together if more than one station
sb_len = len(rastlist)
if sb_len == 1:
grun('g.rename', quiet=True, rast=rastlist[0] + ',' + outname)
elif sb_len > 1:
grun('r.patch', input=','.join(rastlist), output=outname, quiet=True)
else:
grass.fatal('No maps to patch %r for %s' % (rastlist, outname))
return
def get_table(vector, dtype='U250', **kw):
'''Get a vector table into a numpy field array, dtype can either be one
for all or a list for each column'''
tbl = grass.vector_db_select(vector, **kw)
cols = tbl['columns']
values = [tuple(row) for row in tbl['values'].values()]
dtypes = {}
if type(dtype) not in [list, tuple]:
dtypes.update(dict(zip(cols, [dtype] * len(tbl['columns']))))
elif len(dtype) != len(cols):
raise IOError('count of dtype doesnt match the columns!')
else:
dtypes.update(dict(zip(cols, dtype)))
# first check for empty entries
tbl = np.array(values, dtype=list(zip(cols, ['U250'] * len(cols))))
convertedvals = []
for c in cols:
i = tbl[c] == u''
if len(tbl[c][i]) > 0:
gm('Column %s has %s empty cells, will be parsed as float.' %
(c, len(tbl[c][i])))
if dtypes[c] in [float, int]:
dtypes[c] = float
tbl[c][i] = 'nan'
# actual type conversion
convertedvals += [np.array(tbl[c], dtype=dtypes[c])]
# now properly make it
tbl = np.array(list(zip(*convertedvals)),
dtype=[(c, dtypes[c]) for c in cols])
tbl.sort()
return tbl
if __name__ == '__main__':
# start time
st = dt.datetime.now()
# print version/date before doing anything else
mswim.utils.print_version(__file__) if '-v' in sys.argv else None
# get options and flags
o, f = grass.parser()
fmt = lambda d: '\n'.join(['%s: %s' % (k, v) for k, v in d.items()])+'\n'
grass.message('GIS Environment:\n'+fmt(grass.gisenv()))
grass.message('Parameters:\n'+fmt(o)+fmt(f))
# warn if MASKED
if 'MASK' in grass.list_grouped('rast')[grass.gisenv()['MAPSET']]:
maskcells = gread('r.stats', input='MASK', flags='nc').split()[1]
grass.message('!!! MASK active with %s cells, will only process those !!!'
% maskcells)
# send all to main
keywords = o
keywords.update(f)
main = main(**keywords)
main.execute()
# report time it took
delta = dt.datetime.now() - st
grass.message('Execution took %s hh:mm:ss' % delta)
| mwort/m.swim | m.swim.subbasins/m.swim.subbasins.py | Python | mit | 41,603 |
import sys
import errno
import m3u8
import urllib2
from Crypto.Cipher import AES
import StringIO
import socket
import os
blocksize = 16384
class resumable_fetch:
def __init__(self, uri, cur, total):
self.uri = uri
self.cur = cur
self.total = total
self.offset = 0
self._restart()
self.file_size = int(self.stream.info().get('Content-Length', -1))
if self.file_size <= 0:
print "Invalid file size"
sys.exit()
def _progress(self):
sys.stdout.write('\r%d/%d' % (self.cur, self.total))
sys.stdout.flush()
def _restart(self):
req = urllib2.Request(self.uri)
if self.offset:
req.headers['Range'] = 'bytes=%s-' % (self.offset, )
while True:
try:
self.stream = urllib2.urlopen(req, timeout = 30)
break
except socket.timeout:
continue
except socket.error, e:
if e.errno != errno.ECONNRESET:
raise
def read(self, n):
buffer = []
while self.offset < self.file_size:
try:
data = self.stream.read(min(n, self.file_size - self.offset))
self.offset += len(data)
n -= len(data)
buffer.append(data)
if n == 0 or data:
break
except socket.timeout:
self._progress()
self._restart()
except socket.error as e:
if e.errno != errno.ECONNRESET:
raise
self._progress()
self._restart()
return "".join(buffer)
def copy_with_decrypt(input, output, key):
iv = str(key.iv)[2:]
aes = AES.new(key.key_value, AES.MODE_CBC, iv.decode('hex'))
while True:
data = input.read(blocksize)
if not data:
break
output.write(aes.decrypt(data))
def fetch_streams(output, video):
output = open(output, 'wb')
for n, seg in enumerate(video.segments):
sys.stdout.write('\r%d/%d' % (n + 1, len(video.segments)))
sys.stdout.flush()
raw = resumable_fetch(seg.uri, n+1, len(video.segments))
copy_with_decrypt(raw, output, video.key)
size = output.tell()
if size % 188 != 0:
size = size // 188 * 188
output.seek(size)
output.truncate(size)
print '\n'
def fetch_encryption_key(video):
assert video.key.method == 'AES-128'
video.key.key_value = urllib2.urlopen(url = video.key.uri).read()
def find_best_video(uri):
playlist = m3u8.load(uri)
if not playlist.is_variant:
return playlist
best_stream = playlist.playlists[0]
for stream in playlist.playlists:
if stream.stream_info.bandwidth == 'max' or stream.stream_info.bandwidth > best_stream.stream_info.bandwidth:
best_stream = stream
return find_best_video(best_stream.absolute_uri)
def video_hls(uri, output):
video = find_best_video(uri)
fetch_encryption_key(video)
fetch_streams(output, video)
| Kamekameha/crunchy-xml-decoder | crunchy-xml-decoder/hls.py | Python | gpl-2.0 | 3,161 |
from django.conf import settings
from django.db import models
from adhocracy4.categories.form_fields import IconChoiceField
from adhocracy4.modules import models as module_models
class IconField(models.CharField):
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 254
kwargs['default'] = ''
kwargs['blank'] = True
super().__init__(*args, **kwargs)
def contribute_to_class(self, cls, name, **kwargs):
"""Initialize icon choices from the settings if they exist."""
if hasattr(settings, 'A4_CATEGORY_ICONS'):
self.choices = settings.A4_CATEGORY_ICONS
# Call the super method at last so that choices are already initialized
super().contribute_to_class(cls, name, **kwargs)
def formfield(self, **kwargs):
form_class = kwargs.get('choices_form_class', IconChoiceField)
kwargs['choices_form_class'] = form_class
return super().formfield(**kwargs)
class Category(models.Model):
name = models.CharField(max_length=120)
icon = IconField()
module = models.ForeignKey(
module_models.Module,
on_delete=models.CASCADE,
)
class Meta:
verbose_name_plural = 'categories'
def __str__(self):
return self.name
| liqd/adhocracy4 | adhocracy4/categories/models.py | Python | agpl-3.0 | 1,280 |
from param_definition.parameter import Parameter
from cgi import FieldStorage
import Cookie
import base64
import os
from model.user_model import User
from model.db_session import DB_Session_Factory
import uuid
from datetime import datetime
from lib.conf import CFG
import sys
import json
class HTTP_Response_Builder(object):
content_type = 'application/json'
params_dump = {}
requires_authentication = True
required_admin_permissions = 0x0
def __init__(self, params_storage):
try:
for key in params_storage.keys():
self.params_dump[key] = params_storage[key].value
except TypeError:
# FieldStorage throws a type error when trying to iterate it if no params are passed in... wtf
pass
for param_name in dir(self):
param_definition = getattr(self, param_name)
if not isinstance(param_definition, Parameter):
continue
param_value = param_definition.get_value(params_storage.getvalue(param_definition.name))
setattr(self, param_name, param_value)
def get_authenticated_user(self):
if not CFG.get_instance().is_live():
db_session = DB_Session_Factory.get_db_session()
return db_session.query(User).get("vova@box.com")
try:
cookie_string = os.environ['HTTP_COOKIE']
cookie = Cookie.SimpleCookie(cookie_string)
session_cookie = cookie['session_id'].value
except (Cookie.CookieError, KeyError):
session_cookie = None
result = None
return User.user_for_session_cookie(session_cookie)
def is_user_not_authorized(self, authenticated_user):
return authenticated_user is not None and (authenticated_user.admin_permissions & self.required_admin_permissions) != self.required_admin_permissions
def print_headers(self, authenticated_user):
if (authenticated_user is None and self.requires_authentication) or self.is_user_not_authorized(authenticated_user):
print "Status: 403"
print "Content-Type: application/json"
else:
print "Status: 200"
print "Content-Type: " + self.content_type
def get_authentication_needed_json(self):
result = {'error' : 'authn_needed'}
authn_request = """\
<?xml version="1.0" encoding="UTF-8"?>
<saml2p:AuthnRequest xmlns:saml2p="urn:oasis:names:tc:SAML:2.0:protocol" AssertionConsumerServiceURL="http://onsite-inflight.com/api/post-sso" Destination="https://box.okta.com/app/template_saml_2_0/k5jimobgREMCSHKGRLVB/sso/saml" ForceAuthn="false" ID="%s" IsPassive="false" IssueInstant="%s" ProtocolBinding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST" Version="2.0">
<saml2:Issuer xmlns:saml2="urn:oasis:names:tc:SAML:2.0:assertion">http://onsite-inflight.com/api/post-sso</saml2:Issuer>
<saml2p:NameIDPolicy AllowCreate="false" Format="urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified" SPNameQualifier="http://onsite-inflight.com/api/post-sso"/>
</saml2p:AuthnRequest>
""" % (uuid.uuid4().hex, datetime.now().isoformat())
result['authn_request'] = base64.b64encode(authn_request)
return result;
def print_authentication_needed_response(self):
print json.dumps(self.get_authentication_needed_json())
def print_body_for_user(self, authenticated_user):
pass
def print_body(self, authenticated_user):
authenticated_user = self.get_authenticated_user()
if self.requires_authentication is True and authenticated_user is None:
self.print_authentication_needed_response()
else:
if self.is_user_not_authorized(authenticated_user):
result = {'error' : 'not_authorized', 'msg' : authenticated_user.email + " is not authorized to perform this action."}
print json.dumps(result)
else:
self.print_body_for_user(authenticated_user)
def print_response(self):
authenticated_user = self.get_authenticated_user()
self.print_headers(authenticated_user)
print
self.print_body(authenticated_user)
| vovagalchenko/onsite-inflight | api/http_response_builder/http_response_builder.py | Python | mit | 4,184 |
# Django settings for RGT project.
import os
# import django
import django
from ownsettings import *
projectPath = os.path.normpath(os.path.join(os.path.dirname(__file__), '../../'))
DENDROGRAM_FONT_LOCATION = projectPath + '/src/RGT/LiberationSans-Regular.ttf'
HOST_NAME = 'localhost'
EMAIL_VERIFICATION = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': projectPath + '/src/sqlite.db',
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Amsterdam'
USE_TZ = True
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
projectPath + '/static',
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'USE_YOUR_OWN'
# List of callables that know how to import templates from various sources.
# TEMPLATE_LOADERS = (
# 'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.app_directories.Loader',
# # 'django.template.loaders.eggs.Loader',
# )
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
projectPath + '/templates'
# insert your TEMPLATE_DIRS here
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
}
]
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'RGT.urls'
# TEMPLATE_DIRS = (
# # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# # Always use forward slashes, even on Windows.
# # Don't forget to use absolute paths, not relative paths.
# projectPath + '/templates',
# )
#
# TEMPLATE_CONTEXT_PROCESSORS = (
# "django.contrib.auth.context_processors.auth",
# "django.core.context_processors.debug",
# "django.core.context_processors.i18n",
# "django.core.context_processors.media",
# "django.core.context_processors.static",
# "django.core.context_processors.tz",
# "django.contrib.messages.context_processors.messages"
# )
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
# 'django.contrib.admindocs',
'RGT.userProfile',
'RGT.authentication',
'RGT.gridMng',
'RGT.contact',
#'south',
#'RGT.functional_tests',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'console': {
'level': 'WARNING',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
'file': {
'level' : 'DEBUG',
'class': 'logging.FileHandler',
'formatter': 'verbose',
'filename': projectPath + '/log.log'
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django': {
'handlers': ['console', 'file'],
'level': 'WARNING'
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# authentication backend settings
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend', 'RGT.authentication.backendAuthentication.BackendAuthentication')
AUTH_PROFILE_MODULE = 'userProfile.UserProfile'
######## RGT specific variables ########
GRID_USID_KEY_LENGTH= 20
SESSION_USID_KEY_LENGTH= 20
django.setup()
| danrg/RGT-tool | src/RGT/settings.py | Python | mit | 7,678 |
#!bin/python
# -*- coding: utf-8 -*-
from flask import Flask, jsonify, request, Response
import urllib2
import xml.dom.minidom
import datetime
import json
import copy
from dateutil import parser
import csv
import StringIO
import re
from meteoalarm import get_weather_alarms
from ipma import get_weather_forecasted_pt
from weather_observed import get_weather_observed
postal_codes = {
'47001': '47186',
'28001': '28079',
'39001': '39075',
'34001': '34120',
'34200': '34023',
'05194': '05123',
'33300': '33076',
'41001': '41091',
'46005': '46250'
}
localities = {
'Valladolid': '47186',
'Madrid': '28079',
'Santander': '39075',
'Palencia': '34120',
u'Venta de Baños': '34023',
'Mediana de Voltoya': '05123',
'Villaviciosa': '33076',
'Sevilla': '41091',
'Valencia': '46250'
}
app = Flask(__name__)
aemet_service = "http://www.aemet.es/xml/municipios/localidad_{}.xml"
@app.route('/')
def index():
return "Hello, World!"
@app.route('/v2/entities', methods=['GET'])
def get_weather():
entity_type = request.args.get('type')
if entity_type == 'WeatherForecast':
return get_weather_forecasted(request)
elif entity_type == 'WeatherObserved':
return get_weather_observed(request)
elif entity_type == 'WeatherAlarm':
return get_weather_alarms(request)
else:
return Response(json.dumps([]), mimetype='application/json')
def get_data(row, index, conversion=float, factor=1.0):
out = None
value = row[index]
if(value <> ''):
out = conversion(value) / factor
return out
def get_weather_forecasted(request):
country = ''
postal_code = ''
address_locality = ''
query = request.args.get('q')
if not query:
return Response(json.dumps([]), mimetype='application/json')
tokens = query.split(';')
for token in tokens:
items = token.split(':')
if items[0] == 'postalCode':
postal_code = items[1]
elif items[0] == 'country':
country = items[1]
elif items[0] == 'addressLocality':
address_locality = items[1]
if country == 'PT' and address_locality:
return Response(json.dumps(get_weather_forecasted_pt(address_locality)), mimetype='application/json')
if not country or (not postal_code in postal_codes and not address_locality in localities) or country <> 'ES':
return Response(json.dumps([]), mimetype='application/json')
param = ''
if postal_code:
param = postal_codes[postal_code]
elif address_locality:
param = localities[address_locality]
source = aemet_service.format(param)
req = urllib2.Request(url=source)
f = urllib2.urlopen(req)
xml_data = f.read()
DOMTree = xml.dom.minidom.parseString(xml_data).documentElement
address_locality = DOMTree.getElementsByTagName('nombre')[0].firstChild.nodeValue
address = { }
address['addressCountry'] = country
address['postalCode'] = postal_code
address['addressLocality'] = address_locality
created = DOMTree.getElementsByTagName('elaborado')[0].firstChild.nodeValue
forecasts = DOMTree.getElementsByTagName('prediccion')[0].getElementsByTagName('dia')
out = []
for forecast in forecasts:
date = forecast.getAttribute('fecha')
normalizedForecast = parse_aemet_forecast(forecast, date)
counter = 1
for f in normalizedForecast:
f['type'] = 'WeatherForecast'
f['id'] = generate_id(postal_code, country, date) + '_' + str(counter)
f['address'] = address
f['dateCreated'] = created
f['source'] = source
counter+=1
out.append(f)
return Response(json.dumps(out), mimetype='application/json')
def parse_aemet_forecast(forecast, date):
periods = { }
out = []
parsed_date = parser.parse(date)
pops = forecast.getElementsByTagName('prob_precipitacion')
for pop in pops:
period = pop.getAttribute('periodo')
if not period:
period = '00-24'
if pop.firstChild and pop.firstChild.nodeValue:
insert_into_period(periods, period,
'precipitationProbability', float(pop.firstChild.nodeValue) / 100.0)
period = None
weather_types = forecast.getElementsByTagName('estado_cielo')
for weather_type in weather_types:
period = weather_type.getAttribute('periodo')
if not period:
period = '00-24'
if weather_type.firstChild and weather_type.firstChild.nodeValue:
insert_into_period(periods, period, 'weatherType',
weather_type.getAttribute('descripcion'))
period = None
wind_data = forecast.getElementsByTagName('viento')
for wind in wind_data:
period = wind.getAttribute('periodo')
if not period:
period = '00-24'
wind_direction = wind.getElementsByTagName('direccion')[0]
wind_speed = wind.getElementsByTagName('velocidad')[0]
if wind_speed.firstChild and wind_speed.firstChild.nodeValue:
insert_into_period(periods, period, 'windSpeed',
int(wind_speed.firstChild.nodeValue))
if wind_direction.firstChild and wind_direction.firstChild.nodeValue:
insert_into_period(periods, period, 'windDirection',
wind_direction.firstChild.nodeValue)
temperature_node = forecast.getElementsByTagName('temperatura')[0]
max_temp = float(temperature_node.getElementsByTagName('maxima')[0].firstChild.nodeValue)
min_temp = float(temperature_node.getElementsByTagName('minima')[0].firstChild.nodeValue)
get_parameter_data(temperature_node, periods, 'temperature')
temp_feels_node = forecast.getElementsByTagName('sens_termica')[0]
max_temp_feels = float(temp_feels_node.getElementsByTagName('maxima')[0].firstChild.nodeValue)
min_temp_feels = float(temp_feels_node.getElementsByTagName('minima')[0].firstChild.nodeValue)
get_parameter_data(temp_feels_node, periods, 'feelsLikeTemperature')
humidity_node = forecast.getElementsByTagName('humedad_relativa')[0]
max_humidity = float(humidity_node.getElementsByTagName('maxima')[0].firstChild.nodeValue) / 100.0
min_humidity = float(humidity_node.getElementsByTagName('minima')[0].firstChild.nodeValue) / 100.0
get_parameter_data(humidity_node, periods, 'relativeHumidity', 100.0)
for period in periods:
print period
for period in periods:
period_items = period.split('-')
period_start = period_items[0]
period_end = period_items[1]
end_hour = int(period_end)
end_date = copy.deepcopy(parsed_date)
if end_hour > 23:
end_hour = 0
end_date = parsed_date + datetime.timedelta(days=1)
start_date = parsed_date.replace(hour=int(period_start), minute=0, second=0)
end_date = end_date.replace(hour=end_hour,minute=0,second=0)
objPeriod = periods[period]
objPeriod['validity'] = { }
objPeriod['validity']['from'] = start_date.isoformat()
objPeriod['validity']['to'] = end_date.isoformat()
maximum = { }
objPeriod['dayMaximum'] = maximum
minimum = { }
objPeriod['dayMinimum'] = minimum
maximum['temperature'] = max_temp
minimum['temperature'] = min_temp
maximum['relativeHumidity'] = max_humidity
minimum['relativeHumidity'] = min_humidity
maximum['feelsLikeTemperature'] = max_temp_feels
minimum['feelsLikeTemperature'] = min_temp_feels
out.append(objPeriod)
return out
def get_parameter_data(node, periods, parameter, factor=1.0):
param_periods = node.getElementsByTagName('dato')
for param in param_periods:
hour_str = param.getAttribute('hora')
hour = int(hour_str)
interval_start = hour - 6
interval_start_str = str(interval_start)
if interval_start < 10:
interval_start_str = '0' + str(interval_start)
period = interval_start_str + '-' + hour_str
if param.firstChild and param.firstChild.nodeValue:
param_val = float(param.firstChild.nodeValue)
insert_into_period(periods, period, parameter, param_val / factor)
def insert_into_period(periods, period, attribute, value):
if not period in periods:
periods[period] = { }
periods[period][attribute] = value
def generate_id(postal_code, country, date):
return postal_code + '_' + country + '_' + date
if __name__ == '__main__':
app.run(host='0.0.0.0',port=1028,debug=True)
| sergg75/dataModels | Weather/WeatherForecast/harvest/aemet.py | Python | mit | 8,482 |
from font import font
from qt import QFont
class b( font ):
"""
<b> makes text bold.
<p>
<b>Properties:</b>
<br>
See <a href="font.html"><font></a> for properties.
"""
def __init__( self, *args ):
"""
Initiate the container, contents, and properties.
-*args, arguments for the for constructor.
"""
apply( font.__init__, (self,) + args )
self.setWeight( QFont.Bold )
def getHtml( self ):
"""
Get the HTML associated with this object.
Returns a list of html strings, with each entry being a line
in a html file.
"""
return [ "<b>" ] + font.getHtml( self ) + [ "</b>" ]
| derekmd/opentag-presenter | tags/b.py | Python | bsd-2-clause | 619 |
###############################################################################
# volumina: volume slicing and editing library
#
# Copyright (C) 2011-2014, the ilastik developers
# <team@ilastik.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the Lesser GNU General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# See the files LICENSE.lgpl2 and LICENSE.lgpl3 for full text of the
# GNU Lesser General Public License version 2.1 and 3 respectively.
# This information is also available on the ilastik web site at:
# http://ilastik.org/license/
###############################################################################
"""High-level API.
"""
from pixelpipeline.imagepump import ImagePump
from volumina.pixelpipeline.datasources import *
from volumina.layer import *
from volumina.layerstack import LayerStackModel
from volumina.widgets.layerwidget import LayerWidget
# Do NOT import these here because they prevent the volumina.NO3D flag from working properly
#from volumina.volumeEditorWidget import VolumeEditorWidget
#from volumina.volumeEditor import VolumeEditor
from volumina.viewer import Viewer, ClickableSegmentationLayer
from PyQt4.QtGui import QApplication
import sys
def viewerApp():
app = QApplication(sys.argv)
v = Viewer()
return (v, app)
| jakirkham/volumina | volumina/api.py | Python | lgpl-3.0 | 1,718 |
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=1>
# Use the new Cartopy WMTS capabilities to plot some MODIS data
# <codecell>
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
from owslib.wmts import WebMapTileService
# <codecell>
url = 'http://map1c.vis.earthdata.nasa.gov/wmts-geo/wmts.cgi'
wmts = WebMapTileService(url)
# <codecell>
modis_layers = [s for s in sorted(list(wmts.contents)) if 'MODIS' in s ]
# <codecell>
modis_layers
# <codecell>
layer = 'MODIS_Terra_CorrectedReflectance_TrueColor'
# <codecell>
plt.figure(figsize=(12,8))
ax = plt.axes(projection=ccrs.PlateCarree())
ax.add_wmts(url, layer)
#ax.set_extent((-15, 25, 35, 60))
ax.set_extent((10, 40, 35, 50))
plt.title(layer)
plt.show()
# <codecell>
# <codecell>
| rsignell-usgs/notebook | Cartopy_WMTS_test.py | Python | mit | 785 |
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.random_forest import H2ORandomForestEstimator
import random
import copy
def weights_vi():
###### create synthetic dataset1 with 3 predictors: p1 predicts response ~90% of the time, p2 ~70%, p3 ~50%
response = ['a'] * 10000 + ['b'] * 10000
p1 = [(1 if random.uniform(0,1) < 0.9 else 0) if y == 'a' else (0 if random.uniform(0,1) < 0.9 else 1) for y in response]
p2 = [(1 if random.uniform(0,1) < 0.7 else 0) if y == 'a' else (0 if random.uniform(0,1) < 0.7 else 1) for y in response]
p3 = [(1 if random.uniform(0,1) < 0.5 else 0) if y == 'a' else (0 if random.uniform(0,1) < 0.5 else 1) for y in response]
dataset1_python = [response, p1, p2, p3]
dataset1_h2o = h2o.H2OFrame(dataset1_python)
dataset1_h2o.set_names(["response", "p1", "p2", "p3"])
##### create synthetic dataset2 with 3 predictors: p3 predicts response ~90% of the time, p1 ~70%, p2 ~50%
p1 = [(1 if random.uniform(0,1) < 0.7 else 0) if y == 'a' else (0 if random.uniform(0,1) < 0.7 else 1) for y in response]
p2 = [(1 if random.uniform(0,1) < 0.5 else 0) if y == 'a' else (0 if random.uniform(0,1) < 0.5 else 1) for y in response]
p3 = [(1 if random.uniform(0,1) < 0.9 else 0) if y == 'a' else (0 if random.uniform(0,1) < 0.9 else 1) for y in response]
dataset2_python = [response, p1, p2, p3]
dataset2_h2o = h2o.H2OFrame(dataset2_python)
dataset2_h2o.set_names(["response", "p1", "p2", "p3"])
##### compute variable importances on dataset1 and dataset2
model_dataset1 = H2ORandomForestEstimator()
model_dataset1.train(x=["p1", "p2", "p3"], y="response", training_frame=dataset1_h2o)
varimp_dataset1 = tuple([p[0] for p in model_dataset1.varimp()])
assert varimp_dataset1 == ('p1', 'p2', 'p3'), "Expected the following relative variable importance on dataset1: " \
"('p1', 'p2', 'p3'), but got: {0}".format(varimp_dataset1)
model_dataset2 = H2ORandomForestEstimator()
model_dataset2.train(x=["p1", "p2", "p3"], y="response", training_frame=dataset2_h2o)
varimp_dataset2 = tuple([p[0] for p in model_dataset2.varimp()])
assert varimp_dataset2 == ('p3', 'p1', 'p2'), "Expected the following relative variable importance on dataset2: " \
"('p3', 'p1', 'p2'), but got: {0}".format(varimp_dataset2)
############ Test1 #############
##### weight the combined dataset 80/20 in favor of dataset 1
dataset1_python_weighted = copy.deepcopy(dataset1_python) + [[.8] * 20000]
dataset2_python_weighted = copy.deepcopy(dataset2_python) + [[.2] * 20000]
##### combine dataset1 and dataset2
combined_dataset_python = [dataset1_python_weighted[i] + dataset2_python_weighted[i] for i in range(len(dataset1_python_weighted))]
combined_dataset_h2o = h2o.H2OFrame(combined_dataset_python)
combined_dataset_h2o.set_names(["response", "p1", "p2", "p3", "weights"])
##### recompute the variable importances. the relative order should be the same as above.
model_combined_dataset = H2ORandomForestEstimator()
model_combined_dataset.train(x=["p1", "p2", "p3"],
y="response",
training_frame=combined_dataset_h2o,
weights_column="weights")
varimp_combined = tuple([p[0] for p in model_combined_dataset.varimp()])
assert varimp_combined == ('p1', 'p2', 'p3'), "Expected the following relative variable importance on the combined " \
"dataset: ('p1', 'p2', 'p3'), but got: {0}".format(varimp_combined)
############ Test2 #############
##### weight the combined dataset 80/20 in favor of dataset 2
dataset1_python_weighted = copy.deepcopy(dataset1_python) + [[.2] * 20000]
dataset2_python_weighted = copy.deepcopy(dataset2_python) + [[.8] * 20000]
##### combine dataset1 and dataset2
combined_dataset_python = [dataset1_python_weighted[i] + dataset2_python_weighted[i] for i in range(len(dataset1_python_weighted))]
combined_dataset_h2o = h2o.H2OFrame(combined_dataset_python)
combined_dataset_h2o.set_names(["response", "p1", "p2", "p3", "weights"])
##### recompute the variable importances. the relative order should be the same as above.
model_combined_dataset = H2ORandomForestEstimator()
model_combined_dataset.train(x=["p1", "p2", "p3"],
y="response",
training_frame=combined_dataset_h2o,
weights_column="weights")
varimp_combined = tuple([p[0] for p in model_combined_dataset.varimp()])
assert varimp_combined == ('p3', 'p1', 'p2'), "Expected the following relative variable importance on the combined " \
"dataset: ('p3', 'p1', 'p2'), but got: {0}".format(varimp_combined)
if __name__ == "__main__":
pyunit_utils.standalone_test(weights_vi)
else:
weights_vi()
| madmax983/h2o-3 | h2o-py/tests/testdir_algos/rf/pyunit_weights_var_impRF.py | Python | apache-2.0 | 5,018 |
"""
Compute periods for the LINEAR data
-----------------------------------
"""
from __future__ import print_function
from time import time
import numpy as np
from astroML.datasets import fetch_LINEAR_sample
from astroML.time_series import lomb_scargle, multiterm_periodogram, \
search_frequencies
import sqlite3
Ngrid = 50000
DATABASE = 'periods.db'
data = fetch_LINEAR_sample()
# set up a database to hold periods
con = sqlite3.connect(DATABASE)
with con:
cur = con.cursor()
try:
cur.execute("CREATE TABLE Periods(id INT, omega FLOAT)")
except:
pass
for count, id in enumerate(data.ids):
# only compute period if it hasn't been computed before
cur.execute("SELECT * from Periods WHERE id = %i" % id)
res = cur.fetchall()
if len(res) > 0:
print(res[0])
else:
print("computing period for id = {0} ({1} / {2})"
"".format(id, count + 1, len(data.ids))))
lc = data[id]
t0 = time()
omega, power = search_frequencies(lc[:, 0], lc[:, 1], lc[:, 2],
LS_func=multiterm_periodogram,
n_save=5, n_retry=5,
n_eval=10000,
LS_kwargs=dict(n_terms=5))
omega_best = omega[np.argmax(power)]
t1 = time()
print(" - execution time: %.2g sec" % (t1 - t0))
# insert value and commit to disk
cur.execute("INSERT INTO Periods VALUES(%i, %f)"
% (id, omega_best))
con.commit()
con.close()
#cur.execute("SELECT * from Periods")
#print(cur.fetchall())
| nhuntwalker/astroML | book_figures/chapter10/compute_periods.py | Python | bsd-2-clause | 1,783 |
import pytz
from datetime import datetime
import time
import sys
import calendar
SECS_IN_MINUTE = 60
SECS_IN_HOURS = 60*SECS_IN_MINUTE
SECS_IN_DAYS = 24*SECS_IN_HOURS
class TimeHelpers:
@classmethod
def unix_time(cls,year,month,day,hour,minute,second,offset=0):
"""When it is midnight in London, it is 4PM in Seattle: The offset is eight hours. In order
to find the unix time of a local time in Seattle, take the unix time for the time in London.
Then, increase the unix time by eight hours. At this time, it is 4PM in Seattle. Because
Seattle is "behind" London, you will need to subtract the negative number in order to obtain
the unix time of the local number. Thus:
unix_time(local_time,offset) = london_unix_time(hours(local_time))-offset"""
return calendar.timegm( (year,month,day,hour,minute,second) ) - offset
@classmethod
def localtime_to_unix(cls,year,month,day,hour,minute,second,timezone):
dt = pytz.timezone(timezone).localize(datetime(year,month,day,hour,minute,second)).astimezone(pytz.utc)
return calendar.timegm( (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second) )
@classmethod
def datetime_to_unix(cls, dt):
dt = dt.astimezone(pytz.utc)
return calendar.timegm( (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second) )
@classmethod
def create_localtime(cls,year,month,day,hour,minute,second,timezone):
return pytz.timezone(timezone).localize(datetime(year,month,day,hour,minute,second))
@classmethod
def unix_to_localtime(cls,unixtime, timezone):
tt = time.gmtime( unixtime )
dt = pytz.utc.localize(datetime(tt[0],tt[1],tt[2],tt[3],tt[4],tt[5]))
return dt.astimezone( pytz.timezone(timezone) )
@classmethod
def timedelta_to_seconds(cls,td):
return td.days*SECS_IN_DAYS+td.seconds+td.microseconds/1000000.0
@classmethod
def unixtime_to_daytimes(cls,unixtime,timezone):
dt = cls.unix_to_localtime(unixtime,timezone)
ret = dt.hour*3600+dt.minute*60+dt.second
return ret, ret+24*3600, ret+2*24*3600
def withProgress(seq, modValue=100):
c = -1
for c, v in enumerate(seq):
if (c+1) % modValue == 0:
sys.stdout.write("%s\r" % (c+1))
sys.stdout.flush()
yield v
print("\nCompleted %s" % (c+1))
| graphserver/graphserver | pygs/graphserver/util.py | Python | bsd-3-clause | 2,477 |
from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import scaled_cost, mdn_nll
from neuralnilm.plot import MDNPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.5,
n_seq_per_batch=16,
subsample_target=4,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
standardise_input=True,
standardise_targets=True,
input_padding=0,
lag=0,
reshape_target_to_2D=True,
input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
'std': np.array([ 0.12636775], dtype=np.float32)},
target_stats={
'mean': np.array([ 0.04066789, 0.01881946,
0.24639061, 0.17608672, 0.10273963],
dtype=np.float32),
'std': np.array([ 0.11449792, 0.07338708,
0.26608968, 0.33463112, 0.21250485],
dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=partial(scaled_cost, loss_func=mdn_nll),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: mse(x, t).mean(),
updates_func=momentum,
learning_rate=1e-03,
learning_rate_changes_by_iteration={
2000: 5e-04,
5000: 1e-04,
7000: 5e-05
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
plotter=MDNPlotter,
layers_config=[
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
}
]
)
def exp_a(name):
# 5 appliances
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'].extend([
{
'type': MixtureDensityLayer,
'num_units': source.n_outputs,
'num_components': 2
}
])
net = Net(**net_dict_copy)
return net
def exp_b(name):
# one pool layer
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 4, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': MixtureDensityLayer,
'num_units': source.n_outputs,
'num_components': 2
}
]
net = Net(**net_dict_copy)
return net
def exp_c(name):
# BLSTM
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BLSTMLayer,
'num_units': 50,
'gradient_steps': GRADIENT_STEPS,
'peepholes': False,
'W_in_to_cell': Normal(std=1.)
},
{
'type': BLSTMLayer,
'num_units': 50,
'gradient_steps': GRADIENT_STEPS,
'peepholes': False,
'W_in_to_cell': Normal(std=1/sqrt(N))
},
{
'type': FeaturePoolLayer,
'ds': 4, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BLSTMLayer,
'num_units': 50,
'gradient_steps': GRADIENT_STEPS,
'peepholes': False,
'W_in_to_cell': Normal(std=1/sqrt(N))
},
{
'type': MixtureDensityLayer,
'num_units': source.n_outputs,
'num_components': 2
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('abc')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=10000)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| mmottahedi/neuralnilm_prototype | scripts/e294.py | Python | mit | 8,331 |
import json
import math
__author__ = 'apostol3'
class Map:
def __init__(self, w, h):
self.max_time = 120
self.size = (w, h)
self.walls = []
self.headline = []
self.cars = []
self.finish = []
self.objects = []
self.car_size = (1.8/2, 4.6/2)
def start_new_wall(self):
self.walls.append([])
def append_wall_point(self, x, y):
if x > self.size[0] or y > self.size[1]:
self.start_new_wall()
return
self.walls[-1].append((x, y))
def append_headline_point(self, x, y):
if x > self.size[0] or y > self.size[1]:
return
self.headline.append((x, y))
def create_car(self, x, y):
self.cars.append((x, y, 3 * math.pi / 2))
def append_finish_point(self, x, y):
if x > self.size[0] or y > self.size[1]:
self.finish.clear()
if len(self.finish) < 2:
self.finish.append((x, y))
else:
self.finish = [(x, y)]
@staticmethod
def open_from_file(file):
f = open(file, 'r')
doc = json.load(f)
f.close()
size = doc['size']
map = Map(*size)
map.max_time = doc['max_time']
map.walls = doc['walls']
map.finish = doc['finish']
map.headline = doc['headline']
map.cars = doc['cars']
return map
def save_to_file(self, file):
filename = open(file, 'w')
doc = {'size': self.size, 'max_time': self.max_time, 'finish': self.finish,
'walls': self.walls, 'headline': self.headline, 'cars': self.cars}
if len(doc['walls']) != 0 and len(doc['walls'][-1]) == 0:
doc['walls'].pop()
out_inf = json.dumps(doc, indent=4)
filename.write(out_inf)
filename.close()
| Apostol3/race_env_editor | map.py | Python | mit | 1,833 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-07-31 17:48
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('catalog', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('checkout', '0002_auto_20160724_1533'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.IntegerField(blank=True, choices=[(0, 'Aguardando Pagamento'), (1, 'Concluída'), (2, 'Cancelada')], default=0, verbose_name='Situação')),
('payment_option', models.CharField(choices=[('pagseguro', 'PagSeguro'), ('paypal', 'Paypal')], max_length=20, verbose_name='Opção de Pagamento')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Criado em')),
('modified', models.DateTimeField(auto_now=True, verbose_name='Modificado em')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Usuário')),
],
options={
'verbose_name_plural': 'Pedidos',
'verbose_name': 'Pedido',
},
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.PositiveIntegerField(default=1, verbose_name='Quantidade')),
('price', models.DecimalField(decimal_places=2, max_digits=8, verbose_name='Preço')),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='checkout.Order', verbose_name='Pedido')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='catalog.Product', verbose_name='Produto')),
],
options={
'verbose_name_plural': 'Itens dos pedidos',
'verbose_name': 'Item do pedido',
},
),
]
| gileno/djangoecommerce | checkout/migrations/0003_order_orderitem.py | Python | cc0-1.0 | 2,362 |
from datetime import datetime
from ..controllers import post_controller
from ..models.comment import Comment
def add(request):
comment = Comment()
if request.session['current_user'] is not None:
comment.author_id = int(request.session['current_user'])
else:
comment.authorEmail = request.POST['email']
comment.addedOn = datetime.now()
comment.content = request.POST['content']
comment.post_id = int(request.POST['post'])
if request.POST['comment'] <> 0:
comment.parentComment_id = int(request.POST['comment'])
comment.save()
return post_controller.single(comment.post_id)
| jmescuderojustel/codeyourblogin-python-django-1.7 | src/blog/controllers/comment_controller.py | Python | mit | 644 |
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
a = 0.5
k = 7
d = 8
def f(t):
# This is the function that we want to plot
F = (a + np.cos(k / d * t))
x = F * np.cos(t)
y = F * np.sin(t)
return x, y
def f(t):
# This is another function that we want to plot
x = np.sin(t) * np.cos(-7*t/16) - 0.25 * np.cos(t) * np.sin(-7*t/16)
y = np.sin(t) * np.sin(-7*t/16) - 0.25 * np.cos(t) * np.cos(-7*t/16)
return x, y
# Define the t value
t = 0
# get initial function values
x, y = f(t)
# Initialize the figure environment
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_aspect('equal')
r = np.sqrt(x**2+y**2)
r = 1
ax.axis([-r,r,-r,r])
ax.grid(True)
# Get the line object from the figure
line, = ax.plot(x, y, 'm-')
def updatefig(*args):
# Update the plot for the next image
global t # Allow editing the x in the global scope
# update x
t += 0.03
# change the data on the line
x, y = f(t)
X = np.append(line.get_xdata(), x)
Y = np.append(line.get_ydata(), y)
line.set_xdata(X)
line.set_ydata(Y)
# redraw the plot
fig.canvas.draw()
# send back the line as a
return line,
# Create the animation
ani = animation.FuncAnimation(fig, updatefig, interval=3, blit=True)
# To save, requires libav-tools to be installed
#ani.save('name.mp4', writer='avconv')
plt.show()
| robertsj/ME701_examples | plots/animations/2D_ani_plot.py | Python | mit | 1,449 |
# -*- coding: utf-8 -*-
"""Single-elimination cups."""
# Copyright (C) 2015, 2016, 2017 Alexander Jones
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, unicode_literals
from competitions.cup import StandardCup, init_nested_list
class SingleEliminationCup(StandardCup):
"""Single-elimination cup."""
def __init__(self, match_class, rounds=0, teams=[]):
"""Constructor.
@param match_class: The match simulator class
@type match_class: Match
@param rounds: The number of rounds
@type rounds: int
@param teams: An optional list of teams
@type teams: list
"""
super(SingleEliminationCup, self).__init__(match_class=match_class,
teams=teams,
team_count=2 ** rounds,
rounds=rounds)
def update_teams(self, teams):
"""Update the list of teams and the first-round matches.
@param teams: The new list of teams
@type teams: list
"""
self.teams = teams
first_round = self.matches[0]
for x in range(len(first_round)):
first_round[x].team1 = self.teams[x * 2]
first_round[x].team2 = self.teams[x * 2 + 1]
def _build_bracket(self):
"""Build the nested list representing the bracket."""
Match = self.MatchClass
match_count = self.team_count // 2
self.matches.append([Match(self.teams[i * 2], self.teams[i * 2 + 1])
for i in range(match_count)])
match_num = 1
match_count //= 2
match_num = self._build_second_round(match_count, match_num)
for __ in range(2, self.round_count):
match_count //= 2
round = []
for ___ in range(match_count):
round.append(Match('Match {} Winner'.format(match_num),
'Match {} Winner'.format(match_num + 1)))
match_num += 2
self.matches.append(round)
def _build_second_round(self, match_count, match_num):
"""Build the bracket's second round."""
raise NotImplementedError
def _assign_winner(self, winner):
"""Assign winner to their next match."""
next_match = self.matches[self.index[0] + 1][self.index[1] // 2]
if self.index[1] % 2 == 0:
next_match.team1 = winner
else:
next_match.team2 = winner
def _generate_layout(self):
"""Generate the bracket layout for display."""
line_count = self.team_count * 2 - 1
layout = init_nested_list(line_count)
first_team = True
match_gen = self._match_for_layout
for round in range(self.round_count):
match_num = 0
div = 2 * 2 ** round
mod = div // 2 - 1
for i in range(line_count):
(conf, layout_entry) = match_gen((i % div == mod),
round, match_num,
first_team)
layout[i].append(layout_entry)
(match_num, first_team) = conf
return layout
class StandardSingleEliminationCup(SingleEliminationCup):
"""Standard single-elimination cup."""
def _build_second_round(self, match_count, match_num):
"""Build the bracket's second round."""
Match = self.MatchClass
round = []
for ___ in range(match_count):
teams = []
for i in range(2):
previous_match = self.matches[0][match_num + i - 1]
if not previous_match.is_walkover:
teams.append('Match {} Winner'.format(match_num + i))
else:
teams.append(previous_match.team1 or previous_match.team2)
round.append(Match(teams[0], teams[1]))
match_num += 2
self.matches.append(round)
return match_num
def _set_current_match(self):
"""Set the current match."""
super(StandardSingleEliminationCup, self)._set_current_match()
while self.current_match.is_walkover:
super(StandardSingleEliminationCup, self)._set_current_match()
class PowerOfTwoSingleEliminationCup(SingleEliminationCup):
"""Standard single-elimination cup for powers of two (4, 8, 16, etc.)."""
def _build_second_round(self, match_count, match_num):
"""Build the bracket's second round."""
Match = self.MatchClass
round = []
for ___ in range(match_count):
round.append(Match('Match {} Winner'.format(match_num),
'Match {} Winner'.format(match_num + 1)))
match_num += 2
self.matches.append(round)
return match_num
| happy5214/competitions-cup | competitions/cup/default/single_elimination.py | Python | lgpl-3.0 | 5,531 |
# maubot - A plugin-based Matrix bot system.
# Copyright (C) 2019 Tulir Asokan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from urllib.parse import quote
from urllib.request import urlopen, Request
from urllib.error import HTTPError
import functools
import json
from colorama import Fore
import click
from ..config import get_token
from ..cliq import cliq
history_count: int = 10
enc = functools.partial(quote, safe="")
friendly_errors = {
"server_not_found": "Registration target server not found.\n\n"
"To log in or register through maubot, you must add the server to the\n"
"registration_secrets section in the config. If you only want to log in,\n"
"leave the `secret` field empty."
}
@cliq.command(help="Log into a Matrix account via the Maubot server")
@cliq.option("-h", "--homeserver", help="The homeserver to log into", required_unless="list")
@cliq.option("-u", "--username", help="The username to log in with", required_unless="list")
@cliq.option("-p", "--password", help="The password to log in with", inq_type="password",
required_unless="list")
@cliq.option("-s", "--server", help="The maubot instance to log in through", default="",
required=False, prompt=False)
@click.option("-r", "--register", help="Register instead of logging in", is_flag=True,
default=False)
@click.option("-l", "--list", help="List available homeservers", is_flag=True, default=False)
def auth(homeserver: str, username: str, password: str, server: str, register: bool, list: bool
) -> None:
server, token = get_token(server)
if not token:
return
headers = {"Authorization": f"Bearer {token}"}
if list:
url = f"{server}/_matrix/maubot/v1/client/auth/servers"
with urlopen(Request(url, headers=headers)) as resp_data:
resp = json.load(resp_data)
print(f"{Fore.GREEN}Available Matrix servers for registration and login:{Fore.RESET}")
for server in resp.keys():
print(f"* {Fore.CYAN}{server}{Fore.RESET}")
return
endpoint = "register" if register else "login"
headers["Content-Type"] = "application/json"
url = f"{server}/_matrix/maubot/v1/client/auth/{enc(homeserver)}/{endpoint}"
req = Request(url, headers=headers,
data=json.dumps({
"username": username,
"password": password,
}).encode("utf-8"))
try:
with urlopen(req) as resp_data:
resp = json.load(resp_data)
action = "registered" if register else "logged in as"
print(f"{Fore.GREEN}Successfully {action} "
f"{Fore.CYAN}{resp['user_id']}{Fore.GREEN}.")
print(f"{Fore.GREEN}Access token: {Fore.CYAN}{resp['access_token']}{Fore.RESET}")
print(f"{Fore.GREEN}Device ID: {Fore.CYAN}{resp['device_id']}{Fore.RESET}")
except HTTPError as e:
try:
err_data = json.load(e)
error = friendly_errors.get(err_data["errcode"], err_data["error"])
except (json.JSONDecodeError, KeyError):
error = str(e)
action = "register" if register else "log in"
print(f"{Fore.RED}Failed to {action}: {error}{Fore.RESET}")
| tulir/maubot | maubot/cli/commands/auth.py | Python | agpl-3.0 | 3,944 |
"""This module contains the general information for BiosVfOSBootWatchdogTimer ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class BiosVfOSBootWatchdogTimerConsts:
VP_OSBOOT_WATCHDOG_TIMER_DISABLED = "Disabled"
VP_OSBOOT_WATCHDOG_TIMER_ENABLED = "Enabled"
_VP_OSBOOT_WATCHDOG_TIMER_DISABLED = "disabled"
_VP_OSBOOT_WATCHDOG_TIMER_ENABLED = "enabled"
VP_OSBOOT_WATCHDOG_TIMER_PLATFORM_DEFAULT = "platform-default"
class BiosVfOSBootWatchdogTimer(ManagedObject):
"""This is BiosVfOSBootWatchdogTimer class."""
consts = BiosVfOSBootWatchdogTimerConsts()
naming_props = set([])
mo_meta = {
"classic": MoMeta("BiosVfOSBootWatchdogTimer", "biosVfOSBootWatchdogTimer", "OS-Boot-Watchdog-Timer-Param", VersionMeta.Version151f, "InputOutput", 0x1f, [], ["admin", "read-only", "user"], [u'biosPlatformDefaults', u'biosSettings'], [], ["Get", "Set"]),
"modular": MoMeta("BiosVfOSBootWatchdogTimer", "biosVfOSBootWatchdogTimer", "OS-Boot-Watchdog-Timer-Param", VersionMeta.Version2013e, "InputOutput", 0x1f, [], ["admin", "read-only", "user"], [u'biosPlatformDefaults', u'biosSettings'], [], ["Get", "Set"])
}
prop_meta = {
"classic": {
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"vp_os_boot_watchdog_timer": MoPropertyMeta("vp_os_boot_watchdog_timer", "vpOSBootWatchdogTimer", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["Disabled", "Enabled", "disabled", "enabled", "platform-default"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version151f, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
},
"modular": {
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"vp_os_boot_watchdog_timer": MoPropertyMeta("vp_os_boot_watchdog_timer", "vpOSBootWatchdogTimer", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["Disabled", "Enabled", "disabled", "enabled", "platform-default"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
},
}
prop_map = {
"classic": {
"dn": "dn",
"rn": "rn",
"status": "status",
"vpOSBootWatchdogTimer": "vp_os_boot_watchdog_timer",
"childAction": "child_action",
},
"modular": {
"dn": "dn",
"rn": "rn",
"status": "status",
"vpOSBootWatchdogTimer": "vp_os_boot_watchdog_timer",
"childAction": "child_action",
},
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.status = None
self.vp_os_boot_watchdog_timer = None
self.child_action = None
ManagedObject.__init__(self, "BiosVfOSBootWatchdogTimer", parent_mo_or_dn, **kwargs)
| ragupta-git/ImcSdk | imcsdk/mometa/bios/BiosVfOSBootWatchdogTimer.py | Python | apache-2.0 | 3,929 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
from heatclient import exc
from heatclient.v1 import stacks
import mock
from oslo_config import cfg
import six
from heat.common import exception
from heat.common.i18n import _
from heat.common import template_format
from heat.engine import environment
from heat.engine.resources.openstack.heat import remote_stack
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.engine import stack
from heat.engine import template
from heat.tests import common as tests_common
from heat.tests import utils
cfg.CONF.import_opt('action_retry_limit', 'heat.common.config')
parent_stack_template = '''
heat_template_version: 2013-05-23
resources:
remote_stack:
type: OS::Heat::Stack
properties:
context:
region_name: RegionOne
template: { get_file: remote_template.yaml }
timeout: 60
parameters:
name: foo
'''
remote_template = '''
heat_template_version: 2013-05-23
parameters:
name:
type: string
resources:
resource1:
type: GenericResourceType
outputs:
foo:
value: bar
'''
bad_template = '''
heat_template_version: 2013-05-26
parameters:
name:
type: string
resources:
resource1:
type: UnknownResourceType
outputs:
foo:
value: bar
'''
def get_stack(stack_id='c8a19429-7fde-47ea-a42f-40045488226c',
stack_name='teststack', description='No description',
creation_time='2013-08-04T20:57:55Z',
updated_time='2013-08-04T20:57:55Z',
stack_status='CREATE_COMPLETE',
stack_status_reason='',
outputs=None):
action = stack_status[:stack_status.index('_')]
status = stack_status[stack_status.index('_') + 1:]
data = {
'id': stack_id,
'stack_name': stack_name,
'description': description,
'creation_time': creation_time,
'updated_time': updated_time,
'stack_status': stack_status,
'stack_status_reason': stack_status_reason,
'action': action,
'status': status,
'outputs': outputs or None,
}
return stacks.Stack(mock.MagicMock(), data)
class FakeClients(object):
def __init__(self, region_name=None):
self.region_name = region_name or 'RegionOne'
self.hc = None
self.plugin = None
def client(self, name):
if self.region_name in ['RegionOne', 'RegionTwo']:
if self.hc is None:
self.hc = mock.MagicMock()
return self.hc
else:
raise Exception('Failed connecting to Heat')
def client_plugin(self, name):
def examine_exception(ex):
if not isinstance(ex, exc.HTTPNotFound):
raise ex
if self.plugin is None:
self.plugin = mock.MagicMock()
self.plugin.ignore_not_found.side_effect = examine_exception
return self.plugin
class RemoteStackTest(tests_common.HeatTestCase):
def setUp(self):
super(RemoteStackTest, self).setUp()
self.this_region = 'RegionOne'
self.that_region = 'RegionTwo'
self.bad_region = 'RegionNone'
cfg.CONF.set_override('action_retry_limit', 0)
self.parent = None
self.heat = None
self.client_plugin = None
self.this_context = None
self.old_clients = None
def unset_clients_property():
type(self.this_context).clients = self.old_clients
self.addCleanup(unset_clients_property)
def initialize(self):
parent, rsrc = self.create_parent_stack(remote_region='RegionTwo')
self.parent = parent
self.heat = rsrc._context().clients.client("heat")
self.client_plugin = rsrc._context().clients.client_plugin('heat')
def create_parent_stack(self, remote_region=None, custom_template=None):
snippet = template_format.parse(parent_stack_template)
self.files = {
'remote_template.yaml': custom_template or remote_template
}
region_name = remote_region or self.this_region
props = snippet['resources']['remote_stack']['properties']
# context property is not required, default to current region
if remote_region is None:
del props['context']
else:
props['context']['region_name'] = region_name
if self.this_context is None:
self.this_context = utils.dummy_context(
region_name=self.this_region)
tmpl = template.Template(snippet, files=self.files)
parent = stack.Stack(self.this_context, 'parent_stack', tmpl)
# parent context checking
ctx = parent.context.to_dict()
self.assertEqual(self.this_region, ctx['region_name'])
self.assertEqual(self.this_context.to_dict(), ctx)
parent.store()
resource_defns = parent.t.resource_definitions(parent)
rsrc = remote_stack.RemoteStack(
'remote_stack_res',
resource_defns['remote_stack'],
parent)
# remote stack resource checking
self.assertEqual(60, rsrc.properties.get('timeout'))
remote_context = rsrc._context()
hc = FakeClients(rsrc._region_name)
if self.old_clients is None:
self.old_clients = type(remote_context).clients
type(remote_context).clients = mock.PropertyMock(return_value=hc)
return parent, rsrc
def create_remote_stack(self):
# This method default creates a stack on RegionTwo (self.other_region)
defaults = [get_stack(stack_status='CREATE_IN_PROGRESS'),
get_stack(stack_status='CREATE_COMPLETE')]
if self.parent is None:
self.initialize()
# prepare clients to return status
self.heat.stacks.create.return_value = {'stack': get_stack().to_dict()}
self.heat.stacks.get = mock.MagicMock(side_effect=defaults)
rsrc = self.parent['remote_stack']
scheduler.TaskRunner(rsrc.create)()
return rsrc
def test_create_remote_stack_default_region(self):
parent, rsrc = self.create_parent_stack()
self.assertEqual((rsrc.INIT, rsrc.COMPLETE), rsrc.state)
self.assertEqual(self.this_region, rsrc._region_name)
ctx = rsrc.properties.get('context')
self.assertIsNone(ctx)
self.assertIsNone(rsrc.validate())
def test_create_remote_stack_this_region(self):
parent, rsrc = self.create_parent_stack(remote_region=self.this_region)
self.assertEqual((rsrc.INIT, rsrc.COMPLETE), rsrc.state)
self.assertEqual(self.this_region, rsrc._region_name)
ctx = rsrc.properties.get('context')
self.assertEqual(self.this_region, ctx['region_name'])
self.assertIsNone(rsrc.validate())
def test_create_remote_stack_that_region(self):
parent, rsrc = self.create_parent_stack(remote_region=self.that_region)
self.assertEqual((rsrc.INIT, rsrc.COMPLETE), rsrc.state)
self.assertEqual(self.that_region, rsrc._region_name)
ctx = rsrc.properties.get('context')
self.assertEqual(self.that_region, ctx['region_name'])
self.assertIsNone(rsrc.validate())
def test_create_remote_stack_bad_region(self):
parent, rsrc = self.create_parent_stack(remote_region=self.bad_region)
self.assertEqual((rsrc.INIT, rsrc.COMPLETE), rsrc.state)
self.assertEqual(self.bad_region, rsrc._region_name)
ctx = rsrc.properties.get('context')
self.assertEqual(self.bad_region, ctx['region_name'])
ex = self.assertRaises(exception.StackValidationFailed,
rsrc.validate)
msg = ('Cannot establish connection to Heat endpoint '
'at region "%s"' % self.bad_region)
self.assertIn(msg, six.text_type(ex))
def test_remote_validation_failed(self):
parent, rsrc = self.create_parent_stack(remote_region=self.that_region,
custom_template=bad_template)
self.assertEqual((rsrc.INIT, rsrc.COMPLETE), rsrc.state)
self.assertEqual(self.that_region, rsrc._region_name)
ctx = rsrc.properties.get('context')
self.assertEqual(self.that_region, ctx['region_name'])
# not setting or using self.heat because this test case is a special
# one with the RemoteStack resource initialized but not created.
heat = rsrc._context().clients.client("heat")
# heatclient.exc.BadRequest is the exception returned by a failed
# validation
heat.stacks.validate = mock.MagicMock(side_effect=exc.HTTPBadRequest)
ex = self.assertRaises(exception.StackValidationFailed, rsrc.validate)
msg = ('Failed validating stack template using Heat endpoint at region'
' "%s"') % self.that_region
self.assertIn(msg, six.text_type(ex))
def test_create(self):
rsrc = self.create_remote_stack()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.assertEqual('c8a19429-7fde-47ea-a42f-40045488226c',
rsrc.resource_id)
env = environment.get_child_environment(rsrc.stack.env,
{'name': 'foo'})
args = {
'stack_name': rsrc.physical_resource_name(),
'template': template_format.parse(remote_template),
'timeout_mins': 60,
'disable_rollback': True,
'parameters': {'name': 'foo'},
'files': self.files,
'environment': env.user_env_as_dict(),
}
self.heat.stacks.create.assert_called_with(**args)
self.assertEqual(2, len(self.heat.stacks.get.call_args_list))
def test_create_failed(self):
returns = [get_stack(stack_status='CREATE_IN_PROGRESS'),
get_stack(stack_status='CREATE_FAILED',
stack_status_reason='Remote stack creation '
'failed')]
# Note: only this test case does a out-of-band intialization, most of
# the other test cases will have self.parent initialized.
if self.parent is None:
self.initialize()
self.heat.stacks.create.return_value = {'stack': get_stack().to_dict()}
self.heat.stacks.get = mock.MagicMock(side_effect=returns)
rsrc = self.parent['remote_stack']
error = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.create))
error_msg = ('ResourceInError: resources.remote_stack: '
'Went to status CREATE_FAILED due to '
'"Remote stack creation failed"')
self.assertEqual(error_msg, six.text_type(error))
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
def test_delete(self):
returns = [get_stack(stack_status='DELETE_IN_PROGRESS'),
get_stack(stack_status='DELETE_COMPLETE')]
rsrc = self.create_remote_stack()
self.heat.stacks.get = mock.MagicMock(side_effect=returns)
self.heat.stacks.delete = mock.MagicMock()
remote_stack_id = rsrc.resource_id
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.heat.stacks.delete.assert_called_with(stack_id=remote_stack_id)
def test_delete_already_gone(self):
rsrc = self.create_remote_stack()
self.heat.stacks.delete = mock.MagicMock(
side_effect=exc.HTTPNotFound())
self.heat.stacks.get = mock.MagicMock(side_effect=exc.HTTPNotFound())
remote_stack_id = rsrc.resource_id
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.heat.stacks.delete.assert_called_with(stack_id=remote_stack_id)
def test_delete_failed(self):
returns = [get_stack(stack_status='DELETE_IN_PROGRESS'),
get_stack(stack_status='DELETE_FAILED',
stack_status_reason='Remote stack deletion '
'failed')]
rsrc = self.create_remote_stack()
self.heat.stacks.get = mock.MagicMock(side_effect=returns)
self.heat.stacks.delete = mock.MagicMock()
remote_stack_id = rsrc.resource_id
error = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.delete))
error_msg = ('ResourceInError: resources.remote_stack: '
'Went to status DELETE_FAILED due to '
'"Remote stack deletion failed"')
self.assertIn(error_msg, six.text_type(error))
self.assertEqual((rsrc.DELETE, rsrc.FAILED), rsrc.state)
self.heat.stacks.delete.assert_called_with(stack_id=remote_stack_id)
self.assertEqual(rsrc.resource_id, remote_stack_id)
def test_attribute(self):
rsrc = self.create_remote_stack()
outputs = [
{
'output_key': 'foo',
'output_value': 'bar'
}
]
created_stack = get_stack(stack_name='stack1', outputs=outputs)
self.heat.stacks.get = mock.MagicMock(return_value=created_stack)
self.assertEqual('stack1', rsrc.FnGetAtt('stack_name'))
self.assertEqual('bar', rsrc.FnGetAtt('outputs')['foo'])
self.heat.stacks.get.assert_called_with(
stack_id='c8a19429-7fde-47ea-a42f-40045488226c')
def test_attribute_failed(self):
rsrc = self.create_remote_stack()
error = self.assertRaises(exception.InvalidTemplateAttribute,
rsrc.FnGetAtt, 'non-existent_property')
self.assertEqual(
'The Referenced Attribute (remote_stack non-existent_property) is '
'incorrect.',
six.text_type(error))
def test_snapshot(self):
stacks = [get_stack(stack_status='SNAPSHOT_IN_PROGRESS'),
get_stack(stack_status='SNAPSHOT_COMPLETE')]
snapshot = {
'id': 'a29bc9e25aa44f99a9a3d59cd5b0e263',
'status': 'IN_PROGRESS'
}
rsrc = self.create_remote_stack()
self.heat.stacks.get = mock.MagicMock(side_effect=stacks)
self.heat.stacks.snapshot = mock.MagicMock(return_value=snapshot)
scheduler.TaskRunner(rsrc.snapshot)()
self.assertEqual((rsrc.SNAPSHOT, rsrc.COMPLETE), rsrc.state)
self.assertEqual('a29bc9e25aa44f99a9a3d59cd5b0e263',
rsrc.data().get('snapshot_id'))
self.heat.stacks.snapshot.assert_called_with(
stack_id=rsrc.resource_id)
def test_restore(self):
snapshot = {
'id': 'a29bc9e25aa44f99a9a3d59cd5b0e263',
'status': 'IN_PROGRESS'
}
remote_stack = mock.MagicMock()
remote_stack.action = 'SNAPSHOT'
remote_stack.status = 'COMPLETE'
parent, rsrc = self.create_parent_stack()
rsrc.action = rsrc.SNAPSHOT
heat = rsrc._context().clients.client("heat")
heat.stacks.snapshot = mock.MagicMock(return_value=snapshot)
heat.stacks.get = mock.MagicMock(return_value=remote_stack)
scheduler.TaskRunner(parent.snapshot, None)()
self.assertEqual((parent.SNAPSHOT, parent.COMPLETE), parent.state)
data = parent.prepare_abandon()
remote_stack_snapshot = {
'snapshot': {
'id': 'a29bc9e25aa44f99a9a3d59cd5b0e263',
'status': 'COMPLETE',
'data': {
'files': data['files'],
'environment': data['environment'],
'template': template_format.parse(
data['files']['remote_template.yaml'])
}
}
}
fake_snapshot = collections.namedtuple(
'Snapshot', ('data', 'stack_id'))(data, parent.id)
heat.stacks.snapshot_show = mock.MagicMock(
return_value=remote_stack_snapshot)
self.patchobject(rsrc, 'update').return_value = None
rsrc.action = rsrc.UPDATE
rsrc.status = rsrc.COMPLETE
remote_stack.action = 'UPDATE'
parent.restore(fake_snapshot)
self.assertEqual((parent.RESTORE, parent.COMPLETE), parent.state)
def test_check(self):
stacks = [get_stack(stack_status='CHECK_IN_PROGRESS'),
get_stack(stack_status='CHECK_COMPLETE')]
rsrc = self.create_remote_stack()
self.heat.stacks.get = mock.MagicMock(side_effect=stacks)
self.heat.actions.check = mock.MagicMock()
scheduler.TaskRunner(rsrc.check)()
self.assertEqual((rsrc.CHECK, rsrc.COMPLETE), rsrc.state)
self.heat.actions.check.assert_called_with(stack_id=rsrc.resource_id)
def test_check_failed(self):
returns = [get_stack(stack_status='CHECK_IN_PROGRESS'),
get_stack(stack_status='CHECK_FAILED',
stack_status_reason='Remote stack check failed')]
rsrc = self.create_remote_stack()
self.heat.stacks.get = mock.MagicMock(side_effect=returns)
self.heat.actions.resume = mock.MagicMock()
error = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.check))
error_msg = ('ResourceInError: resources.remote_stack: '
'Went to status CHECK_FAILED due to '
'"Remote stack check failed"')
self.assertEqual(error_msg, six.text_type(error))
self.assertEqual((rsrc.CHECK, rsrc.FAILED), rsrc.state)
self.heat.actions.check.assert_called_with(stack_id=rsrc.resource_id)
def test_resume(self):
stacks = [get_stack(stack_status='RESUME_IN_PROGRESS'),
get_stack(stack_status='RESUME_COMPLETE')]
rsrc = self.create_remote_stack()
rsrc.action = rsrc.SUSPEND
self.heat.stacks.get = mock.MagicMock(side_effect=stacks)
self.heat.actions.resume = mock.MagicMock()
scheduler.TaskRunner(rsrc.resume)()
self.assertEqual((rsrc.RESUME, rsrc.COMPLETE), rsrc.state)
self.heat.actions.resume.assert_called_with(stack_id=rsrc.resource_id)
def test_resume_failed(self):
returns = [get_stack(stack_status='RESUME_IN_PROGRESS'),
get_stack(stack_status='RESUME_FAILED',
stack_status_reason='Remote stack resume failed')]
rsrc = self.create_remote_stack()
rsrc.action = rsrc.SUSPEND
self.heat.stacks.get = mock.MagicMock(side_effect=returns)
self.heat.actions.resume = mock.MagicMock()
error = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.resume))
error_msg = ('ResourceInError: resources.remote_stack: '
'Went to status RESUME_FAILED due to '
'"Remote stack resume failed"')
self.assertEqual(error_msg, six.text_type(error))
self.assertEqual((rsrc.RESUME, rsrc.FAILED), rsrc.state)
self.heat.actions.resume.assert_called_with(stack_id=rsrc.resource_id)
def test_resume_failed_not_created(self):
self.initialize()
rsrc = self.parent['remote_stack']
rsrc.action = rsrc.SUSPEND
error = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.resume))
error_msg = ('Error: resources.remote_stack: '
'Cannot resume remote_stack, resource not found')
self.assertEqual(error_msg, six.text_type(error))
self.assertEqual((rsrc.RESUME, rsrc.FAILED), rsrc.state)
def test_suspend(self):
stacks = [get_stack(stack_status='SUSPEND_IN_PROGRESS'),
get_stack(stack_status='SUSPEND_COMPLETE')]
rsrc = self.create_remote_stack()
self.heat.stacks.get = mock.MagicMock(side_effect=stacks)
self.heat.actions.suspend = mock.MagicMock()
scheduler.TaskRunner(rsrc.suspend)()
self.assertEqual((rsrc.SUSPEND, rsrc.COMPLETE), rsrc.state)
self.heat.actions.suspend.assert_called_with(stack_id=rsrc.resource_id)
def test_suspend_failed(self):
stacks = [get_stack(stack_status='SUSPEND_IN_PROGRESS'),
get_stack(stack_status='SUSPEND_FAILED',
stack_status_reason='Remote stack suspend failed')]
rsrc = self.create_remote_stack()
self.heat.stacks.get = mock.MagicMock(side_effect=stacks)
self.heat.actions.suspend = mock.MagicMock()
error = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.suspend))
error_msg = ('ResourceInError: resources.remote_stack: '
'Went to status SUSPEND_FAILED due to '
'"Remote stack suspend failed"')
self.assertEqual(error_msg, six.text_type(error))
self.assertEqual((rsrc.SUSPEND, rsrc.FAILED), rsrc.state)
# assert suspend was not called
self.heat.actions.suspend.assert_has_calls([])
def test_suspend_failed_not_created(self):
self.initialize()
rsrc = self.parent['remote_stack']
# Note: the resource is not created so far
self.heat.actions.suspend = mock.MagicMock()
error = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.suspend))
error_msg = ('Error: resources.remote_stack: '
'Cannot suspend remote_stack, resource not found')
self.assertEqual(error_msg, six.text_type(error))
self.assertEqual((rsrc.SUSPEND, rsrc.FAILED), rsrc.state)
# assert suspend was not called
self.heat.actions.suspend.assert_has_calls([])
def test_update(self):
stacks = [get_stack(stack_status='UPDATE_IN_PROGRESS'),
get_stack(stack_status='UPDATE_COMPLETE')]
rsrc = self.create_remote_stack()
props = copy.deepcopy(rsrc.parsed_template()['Properties'])
props['parameters']['name'] = 'bar'
update_snippet = rsrc_defn.ResourceDefinition(rsrc.name,
rsrc.type(),
props)
self.heat.stacks.get = mock.MagicMock(side_effect=stacks)
scheduler.TaskRunner(rsrc.update, update_snippet)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.assertEqual('bar', rsrc.properties.get('parameters')['name'])
env = environment.get_child_environment(rsrc.stack.env,
{'name': 'bar'})
fields = {
'stack_id': rsrc.resource_id,
'template': template_format.parse(remote_template),
'timeout_mins': 60,
'disable_rollback': True,
'parameters': {'name': 'bar'},
'files': self.files,
'environment': env.user_env_as_dict(),
}
self.heat.stacks.update.assert_called_with(**fields)
self.assertEqual(2, len(self.heat.stacks.get.call_args_list))
def test_update_with_replace(self):
rsrc = self.create_remote_stack()
props = copy.deepcopy(rsrc.parsed_template()['Properties'])
props['context']['region_name'] = 'RegionOne'
update_snippet = rsrc_defn.ResourceDefinition(rsrc.name,
rsrc.type(),
props)
self.assertRaises(exception.UpdateReplace,
scheduler.TaskRunner(rsrc.update, update_snippet))
def test_update_failed(self):
stacks = [get_stack(stack_status='UPDATE_IN_PROGRESS'),
get_stack(stack_status='UPDATE_FAILED',
stack_status_reason='Remote stack update failed')]
rsrc = self.create_remote_stack()
props = copy.deepcopy(rsrc.parsed_template()['Properties'])
props['parameters']['name'] = 'bar'
update_snippet = rsrc_defn.ResourceDefinition(rsrc.name,
rsrc.type(),
props)
self.heat.stacks.get = mock.MagicMock(side_effect=stacks)
error = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.update,
update_snippet))
error_msg = _('ResourceInError: resources.remote_stack: '
'Went to status UPDATE_FAILED due to '
'"Remote stack update failed"')
self.assertEqual(error_msg, six.text_type(error))
self.assertEqual((rsrc.UPDATE, rsrc.FAILED), rsrc.state)
self.assertEqual(2, len(self.heat.stacks.get.call_args_list))
def test_update_no_change(self):
stacks = [get_stack(stack_status='UPDATE_IN_PROGRESS'),
get_stack(stack_status='UPDATE_COMPLETE')]
rsrc = self.create_remote_stack()
props = copy.deepcopy(rsrc.parsed_template()['Properties'])
update_snippet = rsrc_defn.ResourceDefinition(rsrc.name,
rsrc.type(),
props)
self.heat.stacks.get = mock.MagicMock(side_effect=stacks)
scheduler.TaskRunner(rsrc.update, update_snippet)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
| takeshineshiro/heat | heat/tests/test_remote_stack.py | Python | apache-2.0 | 26,392 |
# Copyright 2008 Michiel de Hoon.
# Revisions copyright 2009 Leighton Pritchard.
# Revisions copyright 2010 Peter Cock.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Code to parse output from the EMBOSS eprimer3 program.
As elsewhere in Biopython there are two input functions, read and parse,
for single record output and multi-record output. For primer3, a single
record object is created for each target sequence and may contain
multiple primers.
i.e. If you ran eprimer3 with a single target sequence, use the read
function. If you ran eprimer3 with multiple targets, use the parse
function to iterate over the retsults.
"""
__docformat__ = "restructuredtext en"
# --- primer3
class Record(object):
"""Represent information from a primer3 run finding primers.
Members:
- primers - list of Primer objects describing primer pairs for
this target sequence.
- comments - the comment line(s) for the record
"""
def __init__(self):
self.comments = ""
self.primers = []
class Primers(object):
"""A primer set designed by Primer3.
Members:
- size - length of product, note you can use len(primer) as an
alternative to primer.size
- forward_seq
- forward_start
- forward_length
- forward_tm
- forward_gc
- reverse_seq
- reverse_start
- reverse_length
- reverse_tm
- reverse_gc
- internal_seq
- internal_start
- internal_length
- internal_tm
- internal_gc
"""
def __init__(self):
self.size = 0
self.forward_seq = ""
self.forward_start = 0
self.forward_length = 0
self.forward_tm = 0.0
self.forward_gc = 0.0
self.reverse_seq = ""
self.reverse_start = 0
self.reverse_length = 0
self.reverse_tm = 0.0
self.reverse_gc = 0.0
self.internal_seq = ""
self.internal_start = 0
self.internal_length = 0
self.internal_tm = 0.0
self.internal_gc = 0.0
def __len__(self):
"""Length of the primer product (i.e. product size)."""
return self.size
def parse(handle):
"""Iterate over primer3 output as Bio.Emboss.Primer3.Record objects.
"""
# Skip blank lines at head of file
while True:
line = handle.readline()
if line.strip():
break # Starting a record
# Read each record
record = None
primer = None
while True:
if line.startswith('# EPRIMER3') or line.startswith('# PRIMER3'):
# Record data
if record is not None:
yield record
record = Record()
record.comments += line
primer = None
elif line.startswith('#'):
if line.strip() != '# Start Len Tm GC% Sequence':
record.comments += line
elif not line.strip():
pass
elif line[5:19] == "PRODUCT SIZE: ":
primer = Primers()
primer.size = int(line[19:])
record.primers.append(primer)
elif line[5:19]=="FORWARD PRIMER":
words = line.split()
if not primer or primer.size==0:
primer = Primers()
record.primers.append(primer)
primer.forward_start = int(words[2])
primer.forward_length = int(words[3])
primer.forward_tm = float(words[4])
primer.forward_gc = float(words[5])
primer.forward_seq = words[6]
elif line[5:19]=="REVERSE PRIMER":
words = line.split()
if not primer or primer.size==0:
primer = Primers()
record.primers.append(primer)
primer.reverse_start = int(words[2])
primer.reverse_length = int(words[3])
primer.reverse_tm = float(words[4])
primer.reverse_gc = float(words[5])
primer.reverse_seq = words[6]
elif line[5:19]=="INTERNAL OLIGO":
words = line.split()
if not primer or primer.size==0:
primer = Primers()
record.primers.append(primer)
primer.internal_start = int(words[2])
primer.internal_length = int(words[3])
primer.internal_tm = float(words[4])
primer.internal_gc = float(words[5])
try:
primer.internal_seq = words[6]
except IndexError: # eprimer3 reports oligo without sequence
primer.internal_seq = ''
try:
line = next(handle)
except StopIteration:
break
if record:
yield record
def read(handle):
"""Parse primer3 output into a Bio.Emboss.Primer3.Record object.
This is for when there is one and only one target sequence. If
designing primers for multiple sequences, use the parse function.
"""
iterator = parse(handle)
try:
first = next(iterator)
except StopIteration:
raise ValueError("No records found in handle")
try:
second = next(iterator)
except StopIteration:
second = None
if second is not None:
raise ValueError("More than one record found in handle")
return first
| updownlife/multipleK | dependencies/biopython-1.65/build/lib.linux-x86_64-2.7/Bio/Emboss/Primer3.py | Python | gpl-2.0 | 5,485 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='RequestRouterService',
fields=[
('service_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='core.Service')),
('behindNat', models.BooleanField(default=False, help_text=b"Enables 'Behind NAT' mode.")),
('defaultTTL', models.PositiveIntegerField(default=30, help_text=b'DNS response time-to-live(TTL)')),
('defaultAction', models.CharField(default=b'best', help_text=b'Review if this should be enum', max_length=30)),
('lastResortAction', models.CharField(default=b'random', help_text=b'Review if this should be enum', max_length=30)),
('maxAnswers', models.PositiveIntegerField(default=3, help_text=b'Maximum number of answers in DNS response.')),
],
options={
'verbose_name': 'Request Router Service',
},
bases=('core.service', models.Model),
),
migrations.CreateModel(
name='ServiceMap',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(default=django.utils.timezone.now, auto_now_add=True)),
('updated', models.DateTimeField(default=django.utils.timezone.now, auto_now=True)),
('enacted', models.DateTimeField(default=None, null=True, blank=True)),
('backend_status', models.CharField(default=b'Provisioning in progress', max_length=140)),
('deleted', models.BooleanField(default=False)),
('name', models.SlugField(help_text=b'name of this service map', unique=True)),
('prefix', models.CharField(help_text=b'FQDN of the region of URI space managed by RR on behalf of this service', max_length=256)),
('siteMap', models.FileField(help_text=b'maps client requests to service instances', upload_to=b'maps/', blank=True)),
('accessMap', models.FileField(help_text=b'specifies which client requests are allowed', upload_to=b'maps/', blank=True)),
('owner', models.ForeignKey(help_text=b'service which owns this map', to='core.Service')),
('slice', models.ForeignKey(help_text=b'slice that implements this service', to='core.Slice')),
],
options={
},
bases=(models.Model,),
),
]
| jermowery/xos | xos/services/requestrouter/migrations/0001_initial.py | Python | apache-2.0 | 2,793 |
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""XVP VNC Console Proxy Server."""
import sys
from nova import config
from nova.openstack.common import log as logging
from nova import service
from nova.vnc import xvp_proxy
def main():
config.parse_args(sys.argv)
logging.setup("nova")
wsgi_server = xvp_proxy.get_wsgi_server()
service.serve(wsgi_server)
service.wait()
| zestrada/nova-cs498cc | nova/cmd/xvpvncproxy.py | Python | apache-2.0 | 1,053 |
import pytest
from plenum.server.replica_validator_enums import STASH_CATCH_UP, STASH_WATERMARKS, STASH_VIEW_3PC
from plenum.test.helper import create_pre_prepare_no_bls, generate_state_root
from plenum.test.replica.helper import emulate_catchup
@pytest.fixture(scope='function')
def msg(replica):
pp = create_pre_prepare_no_bls(generate_state_root(),
view_no=replica.viewNo,
pp_seq_no=replica.last_ordered_3pc[1] + 1,
inst_id=replica.instId)
return pp, replica.primaryName
def test_unstash_catchup(replica, msg):
pre_prepare, _ = msg
replica.stasher._stash(STASH_CATCH_UP, "reason", *msg)
assert replica.stasher.stash_size(STASH_CATCH_UP) > 0
emulate_catchup(replica)
assert replica.stasher.stash_size(STASH_CATCH_UP) == 0
def test_unstash_future_view(replica, msg):
pre_prepare, _ = msg
replica.stasher._stash(STASH_VIEW_3PC, "reason", *msg)
assert replica.stasher.stash_size(STASH_VIEW_3PC) > 0
replica.on_view_propagated_after_catchup()
assert replica.stasher.stash_size(STASH_VIEW_3PC) == 0
def test_unstash_watermarks(replica, msg, looper):
pre_prepare, _ = msg
replica.last_ordered_3pc = (replica.viewNo, pre_prepare.ppSeqNo)
replica.stasher._stash(STASH_WATERMARKS, "reason", *msg)
assert replica.stasher.stash_size(STASH_WATERMARKS) > 0
replica._checkpointer.set_watermarks(low_watermark=pre_prepare.ppSeqNo)
assert replica.stasher.stash_size(STASH_WATERMARKS) == 0
| evernym/zeno | plenum/test/replica/stashing/test_replica_unstashing.py | Python | apache-2.0 | 1,561 |
from django.shortcuts import get_object_or_404, render
from django.utils import timezone
from django.views.generic import View
from dashboard.models import Dashboard
class DashboardDetail(View):
def get(self, request, dashboard_id):
dashboard = get_object_or_404(Dashboard, pk=dashboard_id)
ctx = {
'dashboard': dashboard,
'last_update': timezone.now()
}
if request.is_ajax():
template_name = 'dashboard/panels.html'
else:
template_name = 'dashboard/dashboard_detail.html'
return render(request, template_name, ctx)
class DashboardList(View):
def get(self, request):
dashboards = Dashboard.objects.all().order_by('name')
ctx = {
'dashboards': dashboards,
'last_update': timezone.now()
}
if request.is_ajax():
template_name = 'dashboard/dashboards.html'
else:
template_name = 'dashboard/dashboard_list.html'
return render(request, template_name, ctx) | akvo/butler | butler/dashboard/views.py | Python | agpl-3.0 | 1,062 |
from distutils.core import setup
setup(
name='python-logstash',
packages=['logstash'],
version='0.4.7',
description='Python logging handler for Logstash.',
long_description=open('README.rst').read(),
license='MIT',
author='Volodymyr Klochan',
author_email='vklochan@gmail.com',
url='https://github.com/vklochan/python-logstash',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Logging',
]
)
| vklochan/python-logstash | setup.py | Python | mit | 859 |
# TO DO: Add a Python version of this code
# # View the specified parameters of your deep learning model
# model@parameters
#
# # Examine the performance of the trained model
# model # display all performance metrics
#
# h2o.performance(model, valid = FALSE) # training set metrics
# h2o.performance(model, valid = TRUE) # validation set metrics | tarasane/h2o-3 | h2o-docs/src/booklets/v2_2015/source/deeplearning/deeplearning_inspect_model.py | Python | apache-2.0 | 352 |
# coding: utf-8
"""
Qc API
Qc API # noqa: E501
The version of the OpenAPI document: 3.0.0
Contact: cloudsupport@telestream.net
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import telestream_cloud_qc
from telestream_cloud_qc.models.gop_order import GopOrder # noqa: E501
from telestream_cloud_qc.rest import ApiException
class TestGopOrder(unittest.TestCase):
"""GopOrder unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test GopOrder
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = telestream_cloud_qc.models.gop_order.GopOrder() # noqa: E501
if include_optional :
return GopOrder(
)
else :
return GopOrder(
)
def testGopOrder(self):
"""Test GopOrder"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| Telestream/telestream-cloud-python-sdk | telestream_cloud_qc_sdk/test/test_gop_order.py | Python | mit | 1,271 |
"""metaclub URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(
r'^member/',
include('member.urls', namespace="member")
),
]
| ClubCedille/metaclub | django/metaclub/metaclub/urls.py | Python | mit | 865 |
#!/usr/bin/env python3
import argparse
import pickle
import os
def output_one_best(problem, target, solution):
"""Return output for a solution for the one-best."""
return "{0}.{1} {2} :: {3};".format(problem.source_lex,
target,
problem.instance_id,
solution)
def output_five_best(problem, target, solutions):
"""Return output for a solution for the one-best."""
answerstr = ";".join(solutions)
return "{0}.{1} {2} ::: {3};".format(problem.source_lex,
target,
problem.instance_id,
answerstr)
def topfive(dist):
"""Given a distribution (the output of running prob_classify), return the
top five labels in that distribution."""
probs_and_labels = [(dist.prob(key), key) for key in dist.samples()]
descending = sorted(probs_and_labels, reverse=True)
labels = [label for (prob,label) in descending]
return labels[:5]
def get_argparser():
parser = argparse.ArgumentParser(description='clwsd')
parser.add_argument('--model', type=str, required=True)
parser.add_argument('--targetlang', type=str, required=True)
parser.add_argument('--trialdir', type=str, required=True)
parser.add_argument('--treetaggerhome', type=str, required=False,
default="../TreeTagger/cmd")
return parser
| alexrudnick/chipa | squoiawsd/util_run_experiment.py | Python | gpl-3.0 | 1,512 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the non_parametric module.
"""
import numpy as np
from ..non_parametric import gini
def test_gini():
"""
Test Gini coefficient calculation.
"""
data_evenly_distributed = np.ones((100, 100))
data_point_like = np.zeros((100, 100))
data_point_like[50, 50] = 1
assert gini(data_evenly_distributed) == 0.
assert gini(data_point_like) == 1.
| astropy/photutils | photutils/morphology/tests/test_non_parametric.py | Python | bsd-3-clause | 451 |
fo = file("hello")
data = fo.read()
print data
| sburnett/seattle | repy/tests/s_testfileinit.py | Python | mit | 47 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from itertools import groupby
from datetime import datetime, timedelta
from odoo import api, fields, models, _
from odoo.exceptions import UserError
from odoo.tools import float_is_zero, float_compare, DEFAULT_SERVER_DATETIME_FORMAT
from odoo.tools.misc import formatLang
import odoo.addons.decimal_precision as dp
class SaleOrder(models.Model):
_name = "sale.order"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_description = "Sales Order"
_order = 'date_order desc, id desc'
@api.depends('order_line.price_total')
def _amount_all(self):
"""
Compute the total amounts of the SO.
"""
for order in self:
amount_untaxed = amount_tax = 0.0
for line in order.order_line:
amount_untaxed += line.price_subtotal
# FORWARDPORT UP TO 10.0
if order.company_id.tax_calculation_rounding_method == 'round_globally':
price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)
taxes = line.tax_id.compute_all(price, line.order_id.currency_id, line.product_uom_qty, product=line.product_id, partner=line.order_id.partner_id)
amount_tax += sum(t.get('amount', 0.0) for t in taxes.get('taxes', []))
else:
amount_tax += line.price_tax
order.update({
'amount_untaxed': order.pricelist_id.currency_id.round(amount_untaxed),
'amount_tax': order.pricelist_id.currency_id.round(amount_tax),
'amount_total': amount_untaxed + amount_tax,
})
@api.depends('state', 'order_line.invoice_status')
def _get_invoiced(self):
"""
Compute the invoice status of a SO. Possible statuses:
- no: if the SO is not in status 'sale' or 'done', we consider that there is nothing to
invoice. This is also hte default value if the conditions of no other status is met.
- to invoice: if any SO line is 'to invoice', the whole SO is 'to invoice'
- invoiced: if all SO lines are invoiced, the SO is invoiced.
- upselling: if all SO lines are invoiced or upselling, the status is upselling.
The invoice_ids are obtained thanks to the invoice lines of the SO lines, and we also search
for possible refunds created directly from existing invoices. This is necessary since such a
refund is not directly linked to the SO.
"""
for order in self:
invoice_ids = order.order_line.mapped('invoice_lines').mapped('invoice_id').filtered(lambda r: r.type in ['out_invoice', 'out_refund'])
# Search for invoices which have been 'cancelled' (filter_refund = 'modify' in
# 'account.invoice.refund')
# use like as origin may contains multiple references (e.g. 'SO01, SO02')
refunds = invoice_ids.search([('origin', 'like', order.name)])
invoice_ids |= refunds.filtered(lambda r: order.name in [origin.strip() for origin in r.origin.split(',')])
# Search for refunds as well
refund_ids = self.env['account.invoice'].browse()
if invoice_ids:
for inv in invoice_ids:
refund_ids += refund_ids.search([('type', '=', 'out_refund'), ('origin', '=', inv.number), ('origin', '!=', False), ('journal_id', '=', inv.journal_id.id)])
line_invoice_status = [line.invoice_status for line in order.order_line]
if order.state not in ('sale', 'done'):
invoice_status = 'no'
elif any(invoice_status == 'to invoice' for invoice_status in line_invoice_status):
invoice_status = 'to invoice'
elif all(invoice_status == 'invoiced' for invoice_status in line_invoice_status):
invoice_status = 'invoiced'
elif all(invoice_status in ['invoiced', 'upselling'] for invoice_status in line_invoice_status):
invoice_status = 'upselling'
else:
invoice_status = 'no'
order.update({
'invoice_count': len(set(invoice_ids.ids + refund_ids.ids)),
'invoice_ids': invoice_ids.ids + refund_ids.ids,
'invoice_status': invoice_status
})
@api.model
def _default_note(self):
return self.env.user.company_id.sale_note
@api.model
def _get_default_team(self):
return self.env['crm.team']._get_default_team_id()
@api.onchange('fiscal_position_id')
def _compute_tax_id(self):
"""
Trigger the recompute of the taxes if the fiscal position is changed on the SO.
"""
for order in self:
order.order_line._compute_tax_id()
def _inverse_project_id(self):
self.project_id = self.related_project_id
name = fields.Char(string='Order Reference', required=True, copy=False, readonly=True, states={'draft': [('readonly', False)]}, index=True, default=lambda self: _('New'))
origin = fields.Char(string='Source Document', help="Reference of the document that generated this sales order request.")
client_order_ref = fields.Char(string='Customer Reference', copy=False)
state = fields.Selection([
('draft', 'Quotation'),
('sent', 'Quotation Sent'),
('sale', 'Sales Order'),
('done', 'Locked'),
('cancel', 'Cancelled'),
], string='Status', readonly=True, copy=False, index=True, track_visibility='onchange', default='draft')
date_order = fields.Datetime(string='Order Date', required=True, readonly=True, index=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, copy=False, default=fields.Datetime.now)
validity_date = fields.Date(string='Expiration Date', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]},
help="Manually set the expiration date of your quotation (offer), or it will set the date automatically based on the template if online quotation is installed.")
create_date = fields.Datetime(string='Creation Date', readonly=True, index=True, help="Date on which sales order is created.")
confirmation_date = fields.Datetime(string='Confirmation Date', readonly=True, index=True, help="Date on which the sale order is confirmed.", oldname="date_confirm")
user_id = fields.Many2one('res.users', string='Salesperson', index=True, track_visibility='onchange', default=lambda self: self.env.user)
partner_id = fields.Many2one('res.partner', string='Customer', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, required=True, change_default=True, index=True, track_visibility='always')
partner_invoice_id = fields.Many2one('res.partner', string='Invoice Address', readonly=True, required=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="Invoice address for current sales order.")
partner_shipping_id = fields.Many2one('res.partner', string='Delivery Address', readonly=True, required=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="Delivery address for current sales order.")
pricelist_id = fields.Many2one('product.pricelist', string='Pricelist', required=True, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="Pricelist for current sales order.")
currency_id = fields.Many2one("res.currency", related='pricelist_id.currency_id', string="Currency", readonly=True, required=True)
project_id = fields.Many2one('account.analytic.account', 'Analytic Account', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="The analytic account related to a sales order.", copy=False)
related_project_id = fields.Many2one('account.analytic.account', inverse='_inverse_project_id', related='project_id', string='Analytic Account', help="The analytic account related to a sales order.")
order_line = fields.One2many('sale.order.line', 'order_id', string='Order Lines', states={'cancel': [('readonly', True)], 'done': [('readonly', True)]}, copy=True)
invoice_count = fields.Integer(string='# of Invoices', compute='_get_invoiced', readonly=True)
invoice_ids = fields.Many2many("account.invoice", string='Invoices', compute="_get_invoiced", readonly=True, copy=False)
invoice_status = fields.Selection([
('upselling', 'Upselling Opportunity'),
('invoiced', 'Fully Invoiced'),
('to invoice', 'To Invoice'),
('no', 'Nothing to Invoice')
], string='Invoice Status', compute='_get_invoiced', store=True, readonly=True)
note = fields.Text('Terms and conditions', default=_default_note)
amount_untaxed = fields.Monetary(string='Untaxed Amount', store=True, readonly=True, compute='_amount_all', track_visibility='always')
amount_tax = fields.Monetary(string='Taxes', store=True, readonly=True, compute='_amount_all', track_visibility='always')
amount_total = fields.Monetary(string='Total', store=True, readonly=True, compute='_amount_all', track_visibility='always')
payment_term_id = fields.Many2one('account.payment.term', string='Payment Terms', oldname='payment_term')
fiscal_position_id = fields.Many2one('account.fiscal.position', oldname='fiscal_position', string='Fiscal Position')
company_id = fields.Many2one('res.company', 'Company', default=lambda self: self.env['res.company']._company_default_get('sale.order'))
team_id = fields.Many2one('crm.team', 'Sales Team', change_default=True, default=_get_default_team, oldname='section_id')
procurement_group_id = fields.Many2one('procurement.group', 'Procurement Group', copy=False)
product_id = fields.Many2one('product.product', related='order_line.product_id', string='Product')
@api.model
def _get_customer_lead(self, product_tmpl_id):
return False
@api.multi
def button_dummy(self):
return True
@api.multi
def unlink(self):
for order in self:
if order.state not in ('draft', 'cancel'):
raise UserError(_('You can not delete a sent quotation or a sales order! Try to cancel it before.'))
return super(SaleOrder, self).unlink()
@api.multi
def _track_subtype(self, init_values):
self.ensure_one()
if 'state' in init_values and self.state == 'sale':
return 'sale.mt_order_confirmed'
elif 'state' in init_values and self.state == 'sent':
return 'sale.mt_order_sent'
return super(SaleOrder, self)._track_subtype(init_values)
@api.multi
@api.onchange('partner_shipping_id', 'partner_id')
def onchange_partner_shipping_id(self):
"""
Trigger the change of fiscal position when the shipping address is modified.
"""
self.fiscal_position_id = self.env['account.fiscal.position'].get_fiscal_position(self.partner_id.id, self.partner_shipping_id.id)
return {}
@api.multi
@api.onchange('partner_id')
def onchange_partner_id(self):
"""
Update the following fields when the partner is changed:
- Pricelist
- Payment term
- Invoice address
- Delivery address
"""
if not self.partner_id:
self.update({
'partner_invoice_id': False,
'partner_shipping_id': False,
'payment_term_id': False,
'fiscal_position_id': False,
})
return
addr = self.partner_id.address_get(['delivery', 'invoice'])
values = {
'pricelist_id': self.partner_id.property_product_pricelist and self.partner_id.property_product_pricelist.id or False,
'payment_term_id': self.partner_id.property_payment_term_id and self.partner_id.property_payment_term_id.id or False,
'partner_invoice_id': addr['invoice'],
'partner_shipping_id': addr['delivery'],
}
if self.env.user.company_id.sale_note:
values['note'] = self.with_context(lang=self.partner_id.lang).env.user.company_id.sale_note
if self.partner_id.user_id:
values['user_id'] = self.partner_id.user_id.id
if self.partner_id.team_id:
values['team_id'] = self.partner_id.team_id.id
self.update(values)
@api.onchange('partner_id')
def onchange_partner_id_warning(self):
if not self.partner_id:
return
warning = {}
title = False
message = False
partner = self.partner_id
# If partner has no warning, check its company
if partner.sale_warn == 'no-message' and partner.parent_id:
partner = partner.parent_id
if partner.sale_warn != 'no-message':
# Block if partner only has warning but parent company is blocked
if partner.sale_warn != 'block' and partner.parent_id and partner.parent_id.sale_warn == 'block':
partner = partner.parent_id
title = ("Warning for %s") % partner.name
message = partner.sale_warn_msg
warning = {
'title': title,
'message': message,
}
if partner.sale_warn == 'block':
self.update({'partner_id': False, 'partner_invoice_id': False, 'partner_shipping_id': False, 'pricelist_id': False})
return {'warning': warning}
if warning:
return {'warning': warning}
@api.model
def create(self, vals):
if vals.get('name', 'New') == 'New':
vals['name'] = self.env['ir.sequence'].next_by_code('sale.order') or 'New'
# Makes sure partner_invoice_id', 'partner_shipping_id' and 'pricelist_id' are defined
if any(f not in vals for f in ['partner_invoice_id', 'partner_shipping_id', 'pricelist_id']):
partner = self.env['res.partner'].browse(vals.get('partner_id'))
addr = partner.address_get(['delivery', 'invoice'])
vals['partner_invoice_id'] = vals.setdefault('partner_invoice_id', addr['invoice'])
vals['partner_shipping_id'] = vals.setdefault('partner_shipping_id', addr['delivery'])
vals['pricelist_id'] = vals.setdefault('pricelist_id', partner.property_product_pricelist and partner.property_product_pricelist.id)
result = super(SaleOrder, self).create(vals)
return result
@api.multi
def _prepare_invoice(self):
"""
Prepare the dict of values to create the new invoice for a sales order. This method may be
overridden to implement custom invoice generation (making sure to call super() to establish
a clean extension chain).
"""
self.ensure_one()
journal_id = self.env['account.invoice'].default_get(['journal_id'])['journal_id']
if not journal_id:
raise UserError(_('Please define an accounting sale journal for this company.'))
invoice_vals = {
'name': self.client_order_ref or '',
'origin': self.name,
'type': 'out_invoice',
'account_id': self.partner_invoice_id.property_account_receivable_id.id,
'partner_id': self.partner_invoice_id.id,
'partner_shipping_id': self.partner_shipping_id.id,
'journal_id': journal_id,
'currency_id': self.pricelist_id.currency_id.id,
'comment': self.note,
'payment_term_id': self.payment_term_id.id,
'fiscal_position_id': self.fiscal_position_id.id or self.partner_invoice_id.property_account_position_id.id,
'company_id': self.company_id.id,
'user_id': self.user_id and self.user_id.id,
'team_id': self.team_id.id
}
return invoice_vals
@api.multi
def print_quotation(self):
self.filtered(lambda s: s.state == 'draft').write({'state': 'sent'})
return self.env['report'].get_action(self, 'sale.report_saleorder')
@api.multi
def action_view_invoice(self):
invoices = self.mapped('invoice_ids')
action = self.env.ref('account.action_invoice_tree1').read()[0]
if len(invoices) > 1:
action['domain'] = [('id', 'in', invoices.ids)]
elif len(invoices) == 1:
action['views'] = [(self.env.ref('account.invoice_form').id, 'form')]
action['res_id'] = invoices.ids[0]
else:
action = {'type': 'ir.actions.act_window_close'}
return action
@api.multi
def action_invoice_create(self, grouped=False, final=False):
"""
Create the invoice associated to the SO.
:param grouped: if True, invoices are grouped by SO id. If False, invoices are grouped by
(partner_invoice_id, currency)
:param final: if True, refunds will be generated if necessary
:returns: list of created invoices
"""
inv_obj = self.env['account.invoice']
precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')
invoices = {}
references = {}
for order in self:
group_key = order.id if grouped else (order.partner_invoice_id.id, order.currency_id.id)
for line in order.order_line.sorted(key=lambda l: l.qty_to_invoice < 0):
if float_is_zero(line.qty_to_invoice, precision_digits=precision):
continue
if group_key not in invoices:
inv_data = order._prepare_invoice()
invoice = inv_obj.create(inv_data)
references[invoice] = order
invoices[group_key] = invoice
elif group_key in invoices:
vals = {}
if order.name not in invoices[group_key].origin.split(', '):
vals['origin'] = invoices[group_key].origin + ', ' + order.name
if order.client_order_ref and order.client_order_ref not in invoices[group_key].name.split(', '):
vals['name'] = invoices[group_key].name + ', ' + order.client_order_ref
invoices[group_key].write(vals)
if line.qty_to_invoice > 0:
line.invoice_line_create(invoices[group_key].id, line.qty_to_invoice)
elif line.qty_to_invoice < 0 and final:
line.invoice_line_create(invoices[group_key].id, line.qty_to_invoice)
if references.get(invoices.get(group_key)):
if order not in references[invoices[group_key]]:
references[invoice] = references[invoice] | order
if not invoices:
raise UserError(_('There is no invoicable line.'))
for invoice in invoices.values():
if not invoice.invoice_line_ids:
raise UserError(_('There is no invoicable line.'))
# If invoice is negative, do a refund invoice instead
if invoice.amount_untaxed < 0:
invoice.type = 'out_refund'
for line in invoice.invoice_line_ids:
line.quantity = -line.quantity
# Use additional field helper function (for account extensions)
for line in invoice.invoice_line_ids:
line._set_additional_fields(invoice)
# Necessary to force computation of taxes. In account_invoice, they are triggered
# by onchanges, which are not triggered when doing a create.
invoice.compute_taxes()
invoice.message_post_with_view('mail.message_origin_link',
values={'self': invoice, 'origin': references[invoice]},
subtype_id=self.env.ref('mail.mt_note').id)
return [inv.id for inv in invoices.values()]
@api.multi
def action_draft(self):
orders = self.filtered(lambda s: s.state in ['cancel', 'sent'])
orders.write({
'state': 'draft',
'procurement_group_id': False,
})
orders.mapped('order_line').mapped('procurement_ids').write({'sale_line_id': False})
@api.multi
def action_cancel(self):
self.write({'state': 'cancel'})
@api.multi
def action_quotation_send(self):
'''
This function opens a window to compose an email, with the edi sale template message loaded by default
'''
self.ensure_one()
ir_model_data = self.env['ir.model.data']
try:
template_id = ir_model_data.get_object_reference('sale', 'email_template_edi_sale')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference('mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
ctx = dict()
ctx.update({
'default_model': 'sale.order',
'default_res_id': self.ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
'mark_so_as_sent': True,
'custom_layout': "sale.mail_template_data_notification_email_sale_order"
})
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
}
@api.multi
def force_quotation_send(self):
for order in self:
email_act = order.action_quotation_send()
if email_act and email_act.get('context'):
email_ctx = email_act['context']
email_ctx.update(default_email_from=order.company_id.email)
order.with_context(email_ctx).message_post_with_template(email_ctx.get('default_template_id'))
return True
@api.multi
def action_done(self):
self.write({'state': 'done'})
@api.multi
def _prepare_procurement_group(self):
return {'name': self.name}
@api.multi
def action_confirm(self):
for order in self:
order.state = 'sale'
order.confirmation_date = fields.Datetime.now()
if self.env.context.get('send_email'):
self.force_quotation_send()
order.order_line._action_procurement_create()
if self.env['ir.values'].get_default('sale.config.settings', 'auto_done_setting'):
self.action_done()
return True
@api.multi
def _create_analytic_account(self, prefix=None):
for order in self:
name = order.name
if prefix:
name = prefix + ": " + order.name
analytic = self.env['account.analytic.account'].create({
'name': name,
'code': order.client_order_ref,
'company_id': order.company_id.id,
'partner_id': order.partner_id.id
})
order.project_id = analytic
@api.multi
def order_lines_layouted(self):
"""
Returns this order lines classified by sale_layout_category and separated in
pages according to the category pagebreaks. Used to render the report.
"""
self.ensure_one()
report_pages = [[]]
for category, lines in groupby(self.order_line, lambda l: l.layout_category_id):
# If last added category induced a pagebreak, this one will be on a new page
if report_pages[-1] and report_pages[-1][-1]['pagebreak']:
report_pages.append([])
# Append category to current report page
report_pages[-1].append({
'name': category and category.name or 'Uncategorized',
'subtotal': category and category.subtotal,
'pagebreak': category and category.pagebreak,
'lines': list(lines)
})
return report_pages
@api.multi
def _get_tax_amount_by_group(self):
self.ensure_one()
res = {}
currency = self.currency_id or self.company_id.currency_id
for line in self.order_line:
base_tax = 0
for tax in line.tax_id:
group = tax.tax_group_id
res.setdefault(group, 0.0)
amount = tax.compute_all(line.price_reduce + base_tax, quantity=line.product_uom_qty,
product=line.product_id, partner=line.order_partner_id)['taxes'][0]['amount']
res[group] += amount
if tax.include_base_amount:
base_tax += tax.compute_all(line.price_reduce + base_tax, quantity=1, product=line.product_id,
partner=line.order_partner_id)['taxes'][0]['amount']
res = sorted(res.items(), key=lambda l: l[0].sequence)
res = map(lambda l: (l[0].name, l[1]), res)
return res
class SaleOrderLine(models.Model):
_name = 'sale.order.line'
_description = 'Sales Order Line'
_order = 'order_id, layout_category_id, sequence, id'
@api.depends('state', 'product_uom_qty', 'qty_delivered', 'qty_to_invoice', 'qty_invoiced')
def _compute_invoice_status(self):
"""
Compute the invoice status of a SO line. Possible statuses:
- no: if the SO is not in status 'sale' or 'done', we consider that there is nothing to
invoice. This is also hte default value if the conditions of no other status is met.
- to invoice: we refer to the quantity to invoice of the line. Refer to method
`_get_to_invoice_qty()` for more information on how this quantity is calculated.
- upselling: this is possible only for a product invoiced on ordered quantities for which
we delivered more than expected. The could arise if, for example, a project took more
time than expected but we decided not to invoice the extra cost to the client. This
occurs onyl in state 'sale', so that when a SO is set to done, the upselling opportunity
is removed from the list.
- invoiced: the quantity invoiced is larger or equal to the quantity ordered.
"""
precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')
for line in self:
if line.state not in ('sale', 'done'):
line.invoice_status = 'no'
elif not float_is_zero(line.qty_to_invoice, precision_digits=precision):
line.invoice_status = 'to invoice'
elif line.state == 'sale' and line.product_id.invoice_policy == 'order' and\
float_compare(line.qty_delivered, line.product_uom_qty, precision_digits=precision) == 1:
line.invoice_status = 'upselling'
elif float_compare(line.qty_invoiced, line.product_uom_qty, precision_digits=precision) >= 0:
line.invoice_status = 'invoiced'
else:
line.invoice_status = 'no'
@api.depends('product_uom_qty', 'discount', 'price_unit', 'tax_id')
def _compute_amount(self):
"""
Compute the amounts of the SO line.
"""
for line in self:
price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)
taxes = line.tax_id.compute_all(price, line.order_id.currency_id, line.product_uom_qty, product=line.product_id, partner=line.order_id.partner_id)
line.update({
'price_tax': taxes['total_included'] - taxes['total_excluded'],
'price_total': taxes['total_included'],
'price_subtotal': taxes['total_excluded'],
})
@api.depends('product_id.invoice_policy', 'order_id.state')
def _compute_qty_delivered_updateable(self):
for line in self:
line.qty_delivered_updateable = (line.order_id.state == 'sale') and (line.product_id.track_service == 'manual') and (line.product_id.expense_policy == 'no')
@api.depends('qty_invoiced', 'qty_delivered', 'product_uom_qty', 'order_id.state')
def _get_to_invoice_qty(self):
"""
Compute the quantity to invoice. If the invoice policy is order, the quantity to invoice is
calculated from the ordered quantity. Otherwise, the quantity delivered is used.
"""
for line in self:
if line.order_id.state in ['sale', 'done']:
if line.product_id.invoice_policy == 'order':
line.qty_to_invoice = line.product_uom_qty - line.qty_invoiced
else:
line.qty_to_invoice = line.qty_delivered - line.qty_invoiced
else:
line.qty_to_invoice = 0
@api.depends('invoice_lines.invoice_id.state', 'invoice_lines.quantity')
def _get_invoice_qty(self):
"""
Compute the quantity invoiced. If case of a refund, the quantity invoiced is decreased. Note
that this is the case only if the refund is generated from the SO and that is intentional: if
a refund made would automatically decrease the invoiced quantity, then there is a risk of reinvoicing
it automatically, which may not be wanted at all. That's why the refund has to be created from the SO
"""
for line in self:
qty_invoiced = 0.0
for invoice_line in line.invoice_lines:
if invoice_line.invoice_id.state != 'cancel':
if invoice_line.invoice_id.type == 'out_invoice':
qty_invoiced += invoice_line.uom_id._compute_quantity(invoice_line.quantity, line.product_uom)
elif invoice_line.invoice_id.type == 'out_refund':
qty_invoiced -= invoice_line.uom_id._compute_quantity(invoice_line.quantity, line.product_uom)
line.qty_invoiced = qty_invoiced
@api.depends('price_unit', 'discount')
def _get_price_reduce(self):
for line in self:
line.price_reduce = line.price_unit * (1.0 - line.discount / 100.0)
@api.depends('price_total', 'product_uom_qty')
def _get_price_reduce_tax(self):
for line in self:
line.price_reduce_taxinc = line.price_total / line.product_uom_qty if line.product_uom_qty else 0.0
@api.depends('price_subtotal', 'product_uom_qty')
def _get_price_reduce_notax(self):
for line in self:
line.price_reduce_taxexcl = line.price_subtotal / line.product_uom_qty if line.product_uom_qty else 0.0
@api.multi
def _compute_tax_id(self):
for line in self:
fpos = line.order_id.fiscal_position_id or line.order_id.partner_id.property_account_position_id
# If company_id is set, always filter taxes by the company
taxes = line.product_id.taxes_id.filtered(lambda r: not line.company_id or r.company_id == line.company_id)
line.tax_id = fpos.map_tax(taxes, line.product_id, line.order_id.partner_id) if fpos else taxes
@api.multi
def _prepare_order_line_procurement(self, group_id=False):
self.ensure_one()
return {
'name': self.name,
'origin': self.order_id.name,
'date_planned': datetime.strptime(self.order_id.date_order, DEFAULT_SERVER_DATETIME_FORMAT) + timedelta(days=self.customer_lead),
'product_id': self.product_id.id,
'product_qty': self.product_uom_qty,
'product_uom': self.product_uom.id,
'company_id': self.order_id.company_id.id,
'group_id': group_id,
'sale_line_id': self.id
}
@api.multi
def _action_procurement_create(self):
"""
Create procurements based on quantity ordered. If the quantity is increased, new
procurements are created. If the quantity is decreased, no automated action is taken.
"""
precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')
new_procs = self.env['procurement.order'] # Empty recordset
for line in self:
if line.state != 'sale' or not line.product_id._need_procurement():
continue
qty = 0.0
for proc in line.procurement_ids:
qty += proc.product_qty
if float_compare(qty, line.product_uom_qty, precision_digits=precision) >= 0:
continue
if not line.order_id.procurement_group_id:
vals = line.order_id._prepare_procurement_group()
line.order_id.procurement_group_id = self.env["procurement.group"].create(vals)
vals = line._prepare_order_line_procurement(group_id=line.order_id.procurement_group_id.id)
vals['product_qty'] = line.product_uom_qty - qty
new_proc = self.env["procurement.order"].create(vals)
new_proc.message_post_with_view('mail.message_origin_link',
values={'self': new_proc, 'origin': line.order_id},
subtype_id=self.env.ref('mail.mt_note').id)
new_procs += new_proc
new_procs.run()
return new_procs
@api.model
def _get_purchase_price(self, pricelist, product, product_uom, date):
return {}
@api.model
def create(self, values):
onchange_fields = ['name', 'price_unit', 'product_uom', 'tax_id']
if values.get('order_id') and values.get('product_id') and any(f not in values for f in onchange_fields):
line = self.new(values)
line.product_id_change()
for field in onchange_fields:
if field not in values:
values[field] = line._fields[field].convert_to_write(line[field], line)
line = super(SaleOrderLine, self).create(values)
if line.state == 'sale':
line._action_procurement_create()
return line
@api.multi
def write(self, values):
lines = False
if 'product_uom_qty' in values:
precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')
lines = self.filtered(
lambda r: r.state == 'sale' and float_compare(r.product_uom_qty, values['product_uom_qty'], precision_digits=precision) == -1)
result = super(SaleOrderLine, self).write(values)
if lines:
lines._action_procurement_create()
return result
order_id = fields.Many2one('sale.order', string='Order Reference', required=True, ondelete='cascade', index=True, copy=False)
name = fields.Text(string='Description', required=True)
sequence = fields.Integer(string='Sequence', default=10)
invoice_lines = fields.Many2many('account.invoice.line', 'sale_order_line_invoice_rel', 'order_line_id', 'invoice_line_id', string='Invoice Lines', copy=False)
invoice_status = fields.Selection([
('upselling', 'Upselling Opportunity'),
('invoiced', 'Fully Invoiced'),
('to invoice', 'To Invoice'),
('no', 'Nothing to Invoice')
], string='Invoice Status', compute='_compute_invoice_status', store=True, readonly=True, default='no')
price_unit = fields.Float('Unit Price', required=True, digits=dp.get_precision('Product Price'), default=0.0)
price_subtotal = fields.Monetary(compute='_compute_amount', string='Subtotal', readonly=True, store=True)
price_tax = fields.Monetary(compute='_compute_amount', string='Taxes', readonly=True, store=True)
price_total = fields.Monetary(compute='_compute_amount', string='Total', readonly=True, store=True)
price_reduce = fields.Monetary(compute='_get_price_reduce', string='Price Reduce', readonly=True, store=True)
tax_id = fields.Many2many('account.tax', string='Taxes', domain=['|', ('active', '=', False), ('active', '=', True)])
price_reduce_taxinc = fields.Monetary(compute='_get_price_reduce_tax', string='Price Reduce Tax inc', readonly=True, store=True)
price_reduce_taxexcl = fields.Monetary(compute='_get_price_reduce_notax', string='Price Reduce Tax excl', readonly=True, store=True)
discount = fields.Float(string='Discount (%)', digits=dp.get_precision('Discount'), default=0.0)
product_id = fields.Many2one('product.product', string='Product', domain=[('sale_ok', '=', True)], change_default=True, ondelete='restrict', required=True)
product_uom_qty = fields.Float(string='Quantity', digits=dp.get_precision('Product Unit of Measure'), required=True, default=1.0)
product_uom = fields.Many2one('product.uom', string='Unit of Measure', required=True)
qty_delivered_updateable = fields.Boolean(compute='_compute_qty_delivered_updateable', string='Can Edit Delivered', readonly=True, default=True)
qty_delivered = fields.Float(string='Delivered', copy=False, digits=dp.get_precision('Product Unit of Measure'), default=0.0)
qty_to_invoice = fields.Float(
compute='_get_to_invoice_qty', string='To Invoice', store=True, readonly=True,
digits=dp.get_precision('Product Unit of Measure'))
qty_invoiced = fields.Float(
compute='_get_invoice_qty', string='Invoiced', store=True, readonly=True,
digits=dp.get_precision('Product Unit of Measure'))
salesman_id = fields.Many2one(related='order_id.user_id', store=True, string='Salesperson', readonly=True)
currency_id = fields.Many2one(related='order_id.currency_id', store=True, string='Currency', readonly=True)
company_id = fields.Many2one(related='order_id.company_id', string='Company', store=True, readonly=True)
order_partner_id = fields.Many2one(related='order_id.partner_id', store=True, string='Customer')
analytic_tag_ids = fields.Many2many('account.analytic.tag', string='Analytic Tags')
state = fields.Selection([
('draft', 'Quotation'),
('sent', 'Quotation Sent'),
('sale', 'Sale Order'),
('done', 'Done'),
('cancel', 'Cancelled'),
], related='order_id.state', string='Order Status', readonly=True, copy=False, store=True, default='draft')
customer_lead = fields.Float(
'Delivery Lead Time', required=True, default=0.0,
help="Number of days between the order confirmation and the shipping of the products to the customer", oldname="delay")
procurement_ids = fields.One2many('procurement.order', 'sale_line_id', string='Procurements')
layout_category_id = fields.Many2one('sale.layout_category', string='Section')
layout_category_sequence = fields.Integer(related='layout_category_id.sequence', string='Layout Sequence', store=True)
# Store is intentionally set in order to keep the "historic" order.
@api.multi
def _prepare_invoice_line(self, qty):
"""
Prepare the dict of values to create the new invoice line for a sales order line.
:param qty: float quantity to invoice
"""
self.ensure_one()
res = {}
account = self.product_id.property_account_income_id or self.product_id.categ_id.property_account_income_categ_id
if not account:
raise UserError(_('Please define income account for this product: "%s" (id:%d) - or for its category: "%s".') %
(self.product_id.name, self.product_id.id, self.product_id.categ_id.name))
fpos = self.order_id.fiscal_position_id or self.order_id.partner_id.property_account_position_id
if fpos:
account = fpos.map_account(account)
res = {
'name': self.name,
'sequence': self.sequence,
'origin': self.order_id.name,
'account_id': account.id,
'price_unit': self.price_unit,
'quantity': qty,
'discount': self.discount,
'uom_id': self.product_uom.id,
'product_id': self.product_id.id or False,
'layout_category_id': self.layout_category_id and self.layout_category_id.id or False,
'product_id': self.product_id.id or False,
'invoice_line_tax_ids': [(6, 0, self.tax_id.ids)],
'account_analytic_id': self.order_id.project_id.id,
'analytic_tag_ids': [(6, 0, self.analytic_tag_ids.ids)],
}
return res
@api.multi
def invoice_line_create(self, invoice_id, qty):
"""
Create an invoice line. The quantity to invoice can be positive (invoice) or negative
(refund).
:param invoice_id: integer
:param qty: float quantity to invoice
"""
precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')
for line in self:
if not float_is_zero(qty, precision_digits=precision):
vals = line._prepare_invoice_line(qty=qty)
vals.update({'invoice_id': invoice_id, 'sale_line_ids': [(6, 0, [line.id])]})
self.env['account.invoice.line'].create(vals)
@api.multi
def _get_display_price(self, product):
if self.order_id.pricelist_id.discount_policy == 'without_discount':
from_currency = self.order_id.company_id.currency_id
return from_currency.compute(product.lst_price, self.order_id.pricelist_id.currency_id)
return product.with_context(pricelist=self.order_id.pricelist_id.id).price
@api.multi
@api.onchange('product_id')
def product_id_change(self):
if not self.product_id:
return {'domain': {'product_uom': []}}
vals = {}
domain = {'product_uom': [('category_id', '=', self.product_id.uom_id.category_id.id)]}
if not self.product_uom or (self.product_id.uom_id.id != self.product_uom.id):
vals['product_uom'] = self.product_id.uom_id
vals['product_uom_qty'] = 1.0
product = self.product_id.with_context(
lang=self.order_id.partner_id.lang,
partner=self.order_id.partner_id.id,
quantity=vals.get('product_uom_qty') or self.product_uom_qty,
date=self.order_id.date_order,
pricelist=self.order_id.pricelist_id.id,
uom=self.product_uom.id
)
name = product.name_get()[0][1]
if product.description_sale:
name += '\n' + product.description_sale
vals['name'] = name
self._compute_tax_id()
if self.order_id.pricelist_id and self.order_id.partner_id:
vals['price_unit'] = self.env['account.tax']._fix_tax_included_price(self._get_display_price(product), product.taxes_id, self.tax_id)
self.update(vals)
title = False
message = False
warning = {}
if product.sale_line_warn != 'no-message':
title = _("Warning for %s") % product.name
message = product.sale_line_warn_msg
warning['title'] = title
warning['message'] = message
if product.sale_line_warn == 'block':
self.product_id = False
return {'warning': warning}
return {'domain': domain}
@api.onchange('product_uom', 'product_uom_qty')
def product_uom_change(self):
if not self.product_uom:
self.price_unit = 0.0
return
if self.order_id.pricelist_id and self.order_id.partner_id:
product = self.product_id.with_context(
lang=self.order_id.partner_id.lang,
partner=self.order_id.partner_id.id,
quantity=self.product_uom_qty,
date_order=self.order_id.date_order,
pricelist=self.order_id.pricelist_id.id,
uom=self.product_uom.id,
fiscal_position=self.env.context.get('fiscal_position')
)
self.price_unit = self.env['account.tax']._fix_tax_included_price(self._get_display_price(product), product.taxes_id, self.tax_id)
@api.multi
def unlink(self):
if self.filtered(lambda x: x.state in ('sale', 'done')):
raise UserError(_('You can not remove a sale order line.\nDiscard changes and try setting the quantity to 0.'))
return super(SaleOrderLine, self).unlink()
@api.multi
def _get_delivered_qty(self):
'''
Intended to be overridden in sale_stock and sale_mrp
:return: the quantity delivered
:rtype: float
'''
return 0.0
def _get_real_price_currency(self, product, rule_id, qty, uom, pricelist_id):
"""Retrieve the price before applying the pricelist
:param obj product: object of current product record
:parem float qty: total quentity of product
:param tuple price_and_rule: tuple(price, suitable_rule) coming from pricelist computation
:param obj uom: unit of measure of current order line
:param integer pricelist_id: pricelist id of sale order"""
PricelistItem = self.env['product.pricelist.item']
field_name = 'lst_price'
currency_id = None
if rule_id:
pricelist_item = PricelistItem.browse(rule_id)
if pricelist_item.base == 'standard_price':
field_name = 'standard_price'
currency_id = pricelist_item.pricelist_id.currency_id
product_currency = (product.company_id and product.company_id.currency_id) or self.env.user.company_id.currency_id
if not currency_id:
currency_id = product_currency
cur_factor = 1.0
else:
if currency_id.id == product_currency.id:
cur_factor = 1.0
else:
cur_factor = currency_id._get_conversion_rate(product_currency, currency_id)
product_uom = self.env.context.get('uom') or product.uom_id.id
if uom and uom.id != product_uom:
# the unit price is in a different uom
uom_factor = uom._compute_price(1.0, product.uom_id)
else:
uom_factor = 1.0
return product[field_name] * uom_factor * cur_factor, currency_id.id
@api.onchange('product_id', 'price_unit', 'product_uom', 'product_uom_qty', 'tax_id')
def _onchange_discount(self):
self.discount = 0.0
if not (self.product_id and self.product_uom and
self.order_id.partner_id and self.order_id.pricelist_id and
self.order_id.pricelist_id.discount_policy == 'without_discount' and
self.env.user.has_group('sale.group_discount_per_so_line')):
return
context_partner = dict(self.env.context, partner_id=self.order_id.partner_id.id)
pricelist_context = dict(context_partner, uom=self.product_uom.id, date=self.order_id.date_order)
price, rule_id = self.order_id.pricelist_id.with_context(pricelist_context).get_product_price_rule(self.product_id, self.product_uom_qty or 1.0, self.order_id.partner_id)
new_list_price, currency_id = self.with_context(context_partner)._get_real_price_currency(self.product_id, rule_id, self.product_uom_qty, self.product_uom, self.order_id.pricelist_id.id)
new_list_price = self.env['account.tax']._fix_tax_included_price(new_list_price, self.product_id.taxes_id, self.tax_id)
if price != 0 and new_list_price != 0:
if self.product_id.company_id and self.order_id.pricelist_id.currency_id != self.product_id.company_id.currency_id:
# new_list_price is in company's currency while price in pricelist currency
ctx = dict(context_partner, date=self.order_id.date_order)
new_list_price = self.env['res.currency'].browse(currency_id).with_context(ctx).compute(new_list_price, self.order_id.pricelist_id.currency_id)
discount = (new_list_price - price) / new_list_price * 100
if discount > 0:
self.discount = discount
| dfang/odoo | addons/sale/models/sale.py | Python | agpl-3.0 | 48,109 |
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins.vanilla import plugin as p
from sahara.plugins.vanilla import utils as u
from sahara.tests.unit import base
from sahara.tests.unit import testutils as tu
class TestUtils(base.SaharaWithDbTestCase):
def setUp(self):
super(TestUtils, self).setUp()
self.plugin = p.VanillaProvider()
self.ng_manager = tu.make_ng_dict(
'mng', 'f1', ['manager'], 1,
[tu.make_inst_dict('mng1', 'manager')])
self.ng_namenode = tu.make_ng_dict(
'nn', 'f1', ['namenode'], 1,
[tu.make_inst_dict('nn1', 'namenode')])
self.ng_resourcemanager = tu.make_ng_dict(
'jt', 'f1', ['resourcemanager'], 1,
[tu.make_inst_dict('jt1', 'resourcemanager')])
self.ng_datanode = tu.make_ng_dict(
'dn', 'f1', ['datanode'], 2,
[tu.make_inst_dict('dn1', 'datanode-1'),
tu.make_inst_dict('dn2', 'datanode-2')])
self.ng_nodemanager = tu.make_ng_dict(
'tt', 'f1', ['nodemanager'], 2,
[tu.make_inst_dict('tt1', 'nodemanager-1'),
tu.make_inst_dict('tt2', 'nodemanager-2')])
self.ng_oozie = tu.make_ng_dict(
'ooz1', 'f1', ['oozie'], 1,
[tu.make_inst_dict('ooz1', 'oozie')])
self.ng_hiveserver = tu.make_ng_dict(
'hs', 'f1', ['hiveserver'], 1,
[tu.make_inst_dict('hs1', 'hiveserver')])
self.ng_secondarynamenode = tu.make_ng_dict(
'snn', 'f1', ['secondarynamenode'], 1,
[tu.make_inst_dict('snn1', 'secondarynamenode')])
def test_get_namenode(self):
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
[self.ng_manager, self.ng_namenode])
self.assertEqual('nn1', u.get_namenode(cl).instance_id)
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
[self.ng_manager])
self.assertIsNone(u.get_namenode(cl))
def test_get_oozie(self):
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
[self.ng_manager, self.ng_oozie])
self.assertEqual('ooz1', u.get_oozie(cl).instance_id)
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
[self.ng_manager])
self.assertIsNone(u.get_oozie(cl))
def test_get_hiveserver(self):
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
[self.ng_manager, self.ng_hiveserver])
self.assertEqual('hs1', u.get_hiveserver(cl).instance_id)
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
[self.ng_manager])
self.assertIsNone(u.get_hiveserver(cl))
def test_get_datanodes(self):
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
[self.ng_manager, self.ng_namenode,
self.ng_datanode])
datanodes = u.get_datanodes(cl)
self.assertEqual(2, len(datanodes))
self.assertEqual(set(['dn1', 'dn2']),
set([datanodes[0].instance_id,
datanodes[1].instance_id]))
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
[self.ng_manager])
self.assertEqual([], u.get_datanodes(cl))
def test_get_secondarynamenodes(self):
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
[self.ng_manager, self.ng_namenode,
self.ng_secondarynamenode])
self.assertEqual('snn1', u.get_secondarynamenode(cl).instance_id)
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
[self.ng_manager])
self.assertEqual(None, u.get_secondarynamenode(cl))
| zhangjunli177/sahara | sahara/tests/unit/plugins/vanilla/test_utils.py | Python | apache-2.0 | 4,454 |
from __future__ import absolute_import
from django.contrib import admin
from .models import Genre
from .models import Tag
from .models import Book
from .models import Asset
from .models import Author
class GenreAdmin(admin.ModelAdmin):
list_display = ('name', 'id')
class BookAdmin(admin.ModelAdmin):
list_display = ('name', )
search_fields = ['name']
class TagAdmin(admin.ModelAdmin):
list_display = ('text', )
search_fields = ['text']
class AssetAdmin(admin.ModelAdmin):
list_display = ('book', 'asset_type')
search_fields = ['book']
class AuthorAdmin(admin.ModelAdmin):
list_display = ('name', )
search_fields = ['name']
admin.site.register(Genre, GenreAdmin)
admin.site.register(Tag, TagAdmin)
admin.site.register(Book, BookAdmin)
admin.site.register(Asset, AssetAdmin)
admin.site.register(Author, AuthorAdmin)
| r-singh/Test2 | webapp_project/website/admin.py | Python | mit | 857 |
# -*- encoding: utf-8 -*-
#############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-Today Serpent Consulting Services Pvt. Ltd.
# (<http://www.serpentcs.com>)
# Copyright (C) 2004 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
#############################################################################
from . import hotel_reservation_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| MarcosCommunity/odoo | comunity_modules/hotel_reservation/report/__init__.py | Python | agpl-3.0 | 1,167 |
import math
import webbrowser
from PySide import QtGui, QtCore
from ui.settingswindow import SettingsWindow
class SystemTray(QtGui.QSystemTrayIcon):
def __init__(self, parent=None):
QtGui.QSystemTrayIcon.__init__(self, parent)
self.parent = parent
#menu before logging into Smartfile
self.menu = QtGui.QMenu(parent)
self.setIcon(QtGui.QIcon(":/menuicon.png"))
self.setToolTip('SmartFile Sync')
startAction = self.menu.addAction("Open SmartFile Folder")
self.connect(startAction, QtCore.SIGNAL("triggered()"),
self.parent.open_sync_folder)
startingup = self.menu.addAction("Connecting...")
startingup.setEnabled(False)
self.menu.addSeparator()
exitAction = self.menu.addAction("Exit")
self.connect(exitAction, QtCore.SIGNAL("triggered()"),
self.parent.exit)
self.setContextMenu(self.menu)
self.show()
def open_website(self):
url = "https://app.smartfile.com"
webbrowser.open(url,new=2)
def open_settings(self):
"""Opens the settings window and brings it into focus"""
self.settingswindow.show_settings()
def on_login(self):
"""
After auth finishes, create the settings window
and update the system tray to display disk usage quota
"""
try:
whoami = self.parent.api.get("/whoami/")
realname = whoami['user']['name']
email = whoami['user']['email']
except:
realname = ""
email = ""
"""
try:
usedBytes = int(whoami['site']['quota']['disk_bytes_tally'])
bytesLimit = int(whoami['site']['quota']['disk_bytes_limit'])
percentUsed = usedBytes / bytesLimit
canCalculateSpace = True
except:
canCalculateSpace = False
"""
# initialize settings window
self.settingswindow = SettingsWindow(self.parent, name=realname,
email=email)
"""
if canCalculateSpace:
spaceLimit = bytesLimit
if spaceLimit < 1024:
measurement = "bytes"
elif spaceLimit < int(math.pow(1024, 2)):
spaceLimit /= 1024
measurement = "KB"
elif spaceLimit < int(math.pow(1024, 3)):
spaceLimit /= int(math.pow(1024, 2))
measurement = "MB"
else:
spaceLimit /= int(math.pow(1024, 3))
measurement = "GB"
"""
#menu after logging into Smartfile
self.menu = QtGui.QMenu(self.parent)
self.setIcon(QtGui.QIcon(":/menuicon.png"))
self.setToolTip('SmartFile Sync')
startAction = self.menu.addAction("Open SmartFile Folder")
self.connect(startAction, QtCore.SIGNAL("triggered()"),
self.parent.open_sync_folder)
openWebsite = self.menu.addAction("Launch SmartFile Website")
self.connect(openWebsite, QtCore.SIGNAL("triggered()"),
self.open_website)
"""
if canCalculateSpace:
self.menu.addSeparator()
"""
#quota = self.menu.addAction("%.1f%s of %s%s used" %
# (percentUsed, "%", spaceLimit,
# measurement))
#quota.setEnabled(False)
self.menu.addSeparator()
settingsAction = self.menu.addAction("Settings")
self.connect(settingsAction, QtCore.SIGNAL("triggered()"),
self.open_settings)
self.menu.addSeparator()
exitAction = self.menu.addAction("Exit")
self.connect(exitAction, QtCore.SIGNAL("triggered()"),
self.parent.exit)
self.setContextMenu(self.menu)
def notification(self, title, message):
"""Shows a system tray notification"""
if self.parent.config.get('LocalSettings', 'notifications'):
self.showMessage(title, message, QtGui.QSystemTrayIcon.NoIcon)
| travcunn/kissync-python | ui/systemtray.py | Python | mit | 4,157 |
import os.path as op
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from mne.parallel import parallel_func
from mne.utils import ProgressBar, array_split_idx, use_log_level
def test_progressbar():
"""Test progressbar class."""
a = np.arange(10)
pbar = ProgressBar(a)
assert a is pbar.iterable
assert pbar.max_value == 10
pbar = ProgressBar(10)
assert pbar.max_value == 10
assert pbar.iterable is None
# Make sure that non-iterable input raises an error
def iter_func(a):
for ii in a:
pass
pytest.raises(Exception, iter_func, ProgressBar(20))
def _identity(x):
return x
def test_progressbar_parallel_basic(capsys):
"""Test ProgressBar with parallel computing, basic version."""
assert capsys.readouterr().out == ''
parallel, p_fun, _ = parallel_func(_identity, total=10, n_jobs=1,
verbose=True)
with use_log_level(True):
out = parallel(p_fun(x) for x in range(10))
assert out == list(range(10))
cap = capsys.readouterr()
out = cap.err
assert '100%' in out
def _identity_block(x, pb):
for ii in range(len(x)):
pb.update(ii + 1)
return x
def test_progressbar_parallel_advanced(capsys):
"""Test ProgressBar with parallel computing, advanced version."""
assert capsys.readouterr().out == ''
# This must be "1" because "capsys" won't get stdout properly otherwise
parallel, p_fun, _ = parallel_func(_identity_block, n_jobs=1,
verbose=False)
arr = np.arange(10)
with use_log_level(True):
with ProgressBar(len(arr)) as pb:
out = parallel(p_fun(x, pb.subset(pb_idx))
for pb_idx, x in array_split_idx(arr, 2))
assert op.isfile(pb._mmap_fname)
sum_ = np.memmap(pb._mmap_fname, dtype='bool', mode='r',
shape=10).sum()
assert sum_ == len(arr)
assert not op.isfile(pb._mmap_fname), '__exit__ not called?'
out = np.concatenate(out)
assert_array_equal(out, arr)
cap = capsys.readouterr()
out = cap.err
assert '100%' in out
def _identity_block_wide(x, pb):
for ii in range(len(x)):
for jj in range(2):
pb.update(ii * 2 + jj + 1)
return x, pb.idx
def test_progressbar_parallel_more(capsys):
"""Test ProgressBar with parallel computing, advanced version."""
assert capsys.readouterr().out == ''
# This must be "1" because "capsys" won't get stdout properly otherwise
parallel, p_fun, _ = parallel_func(_identity_block_wide, n_jobs=1,
verbose=False)
arr = np.arange(10)
with use_log_level(True):
with ProgressBar(len(arr) * 2) as pb:
out = parallel(p_fun(x, pb.subset(pb_idx))
for pb_idx, x in array_split_idx(
arr, 2, n_per_split=2))
idxs = np.concatenate([o[1] for o in out])
assert_array_equal(idxs, np.arange(len(arr) * 2))
out = np.concatenate([o[0] for o in out])
assert op.isfile(pb._mmap_fname)
sum_ = np.memmap(pb._mmap_fname, dtype='bool', mode='r',
shape=len(arr) * 2).sum()
assert sum_ == len(arr) * 2
assert not op.isfile(pb._mmap_fname), '__exit__ not called?'
cap = capsys.readouterr()
out = cap.err
assert '100%' in out
| Teekuningas/mne-python | mne/utils/tests/test_progressbar.py | Python | bsd-3-clause | 3,513 |
import importlib
import inspect
from . import path, random
from .path import *
from .random import *
__all__ = [
'NOT_SET',
'BOOL_STR_MAP',
'STR_BOOL_MAP',
'as_bool',
'filter_items',
'get_items_with_key_prefix',
'load_object',
] + path.__all__ + random.__all__
NOT_SET = type('NOT_SET', (), {
'__bool__': (lambda self: False),
'__str__': (lambda self: 'NOT_SET'),
'__repr__': (lambda self: 'NOT_SET'),
'__copy__': (lambda self: self),
})()
"""A ``None``-ish constant for use where ``None`` may be a valid value."""
BOOL_STR_MAP = {
True: ('true', 'yes', 'y', 'on', '1'),
False: ('false', 'no', 'n', 'off', '0'),
}
STR_BOOL_MAP = {}
for b, strs in BOOL_STR_MAP.items():
for s in strs:
STR_BOOL_MAP[s] = b
def as_bool(value):
"""Convert value to bool."""
if isinstance(value, str):
try:
return STR_BOOL_MAP[value.strip().lower()]
except KeyError:
raise ValueError('Could not convert {} to bool'.format(value))
return bool(value)
def filter_items(items,
include=lambda k, v: True,
exclude=lambda k, v: False,
processors=()):
"""Filter and optionally process ``items``; yield pairs.
``items`` can be any object with a ``.items()`` method that returns
a sequence of pairs (e.g., a dict), or it can be a sequence of pairs
(e.g., a list of 2-item tuples).
Each item will be passed to ``include`` and then to ``exclude``;
they must return ``True`` and ``False`` respectively for the item to
be yielded.
If there are any ``processors``, each included item will be passed
to each processor in turn.
"""
try:
items = items.items()
except AttributeError:
pass
for k, v in items:
if include(k, v) and not exclude(k, v):
for processor in processors:
k, v = processor(k, v)
yield k, v
def get_items_with_key_prefix(items, prefix, strip_prefix=True, processors=()):
"""Filter ``items`` to those with a key that starts with ``prefix``.
``items`` is typically a dict but can also be a sequence. See
:func:`filter_items` for more on that.
"""
include = lambda k, v: k.startswith(prefix)
if strip_prefix:
prefix_len = len(prefix)
processors = (lambda k, v: (k[prefix_len:], v),) + processors
filtered = filter_items(items, include=include, processors=processors)
return items.__class__(filtered)
def load_object(obj, obj_name=None, package=None, level=2):
"""Load an object.
``obj`` may be an object or a string that points to an object. If
it's a string, the object will be loaded and returned from the
specified path. If it's any other type of object, it will be
returned as is.
The format of a path string is either 'package.module' to load a
module or 'package.module:object' to load an object from a module.
The object name can be passed via ``obj_name`` instead of in the
path (if the name is passed both ways, the name in the path will
win).
Examples::
>>> load_object('tangled.util:load_object')
<function load_object at ...>
>>> load_object('tangled.util', 'load_object')
<function load_object at ...>
>>> load_object('tangled.util:load_object', 'IGNORED')
<function load_object at ...>
>>> load_object('.util:load_object', package='tangled')
<function load_object at ...>
>>> load_object('.:load_object', package='tangled.util')
<function load_object at ...>
>>> load_object(':load_object', package='tangled.util')
<function load_object at ...>
>>> load_object(load_object)
<function load_object at ...>
>>> load_object(load_object, 'IGNORED', 'IGNORED', 'IGNORED')
<function load_object at ...>
"""
if isinstance(obj, str):
if is_object_path(obj):
module_name, obj_name = obj.split(':')
if not module_name:
module_name = '.'
elif is_module_path(obj):
module_name = obj
else:
raise ValueError('Path is not an object or module path: %s' % obj)
if module_name.startswith('.') and package is None:
package = caller_package(level)
obj = importlib.import_module(module_name, package)
if obj_name:
attrs = obj_name.split('.')
for attr in attrs:
obj = getattr(obj, attr)
return obj
def caller_package(level=2):
frame = inspect.stack()[level][0]
package = frame.f_globals['__package__']
return package
| TangledWeb/tangled | tangled/util/__init__.py | Python | mit | 4,706 |
#!/usr/bin/env python
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
__author__ = 'Max Arnold <arnold.maxim@gmail.com>'
__version__ = '0.1.6'
setup(
name='python-mpns',
version=__version__,
# Package dependencies.
install_requires=['requests>=2.0'],
# Metadata for PyPI.
author='Max Arnold',
author_email='arnold.maxim@gmail.com',
license='BSD',
url='http://github.com/max-arnold/python-mpns',
keywords='mobile push notification microsoft mpns windows phone',
description='Python module for Microsoft Push Notification Service (MPNS) for Windows Phone',
long_description=open(os.path.abspath(os.path.join(os.path.dirname(__file__), 'README.md')), 'r').read(),
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Communications :: Telephony',
'Topic :: Internet'
],
packages=['mpns'],
platforms='any',
)
| max-arnold/python-mpns | setup.py | Python | bsd-3-clause | 1,171 |
from __future__ import division #Para não truncar a divisão de inteiros
from visual import * #Módulo com as funções gráficas do VPython
from random import random #Gerador de números aleatórios
print """
#############{{{{{{{{{{{{{{{{{{{{{{{{}}}}}}}}}}}}}}}}}}}}}}}}}##############
########**********************************************************########
###>>>>>>>>>> Simulacao de impacto de asteroides na Terra <<<<<<<<<<<###
###>>>>>>>>>> Fisica 1 - MIEC - 09/10 <<<<<<<<<<<###
###>>>>>>>>>> Carlos Miguel Correia da Costa <<<<<<<<<<<###
########**********************************************************########
#############{{{{{{{{{{{{{{{{{{{{{{{{}}}}}}}}}}}}}}}}}}}}}}}}}##############
#####&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&#######
## $$$$$$ Introducao $$$$$ ##
#####&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&#######
A presente simulacao tem por objectivo mostrar a influencia da gravidade
da Terra e da Lua nos objectos circundantes, e como tal salientar que
asteroides que a partida nao colidiriam com a Terra, poderao entrar em
rota de colisao com a Terra caso a Lua os desvie da sua orbita inicial.
Nesta simulacao para alem das forcas da gravidade foram incluidas as colisoes
inelasticas entre objectos, com coeficiente de restituicao de 0.75, e
incluida a acrecao de materia / objectos apos um certo numero de colisoes.
Nota:
A parte mais importante da simulacao e a orbita de colisao do asteroide principal,
mas para que houvesse mais projeccao de particulas para o espaco foram incluidos
10 asteroides extra, em rota de colisao com a Terra, que apenas entrarao em cena
apos o impacto do asteroide principal.
#############{{{{{{{{{{{{{{{{{{{{{{{{}}}}}}}}}}}}}}}}}}}}}}}}}##############
########**********************************************************########
#############{{{{{{{{{{{{{{{{{{{{{{{{}}}}}}}}}}}}}}}}}}}}}}}}}##############
"""
#####&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&#######
## $$$$$$ Configuração da janela de visualização da simulação $$$$$$ ##
#####&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&#######
scene.title = "Asteroid impact"
scene.rotate = (pi/4)
scene.forward = (-1,-0.7,-1)
scene.width = 1920
scene.height = 1080
scene.fullscreen = True #Simulação começa em ecrâ total
scene.autoscale = False #A escala da visualização não é alterada
Tamanho_eixos = 250000 #Valor usado na determinação de posições aleatórias
#dos asteroides e na escala de visualização
scene.range = 0.5 * Tamanho_eixos #Definição da escala de visualização
scene_rate = 100 #Definição do número de computações a fazer por ciclo
#Eixos coordenados
L = 15000
xaxis = curve(pos=[(0,0,0), (L,0,0)], color=(0.5,0.5,0.5))
yaxis = curve(pos=[(0,0,0), (0,L,0)], color=(0.5,0.5,0.5))
zaxis = curve(pos=[(0,0,0), (0,0,L)], color=(0.5,0.5,0.5))
#####&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&######
## Varíáveis parametrizáveis para simulação de vários tipos de impactos ##
#####&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&######
#Definição do dt a usar nos incrementos de velocidade
dt = 0.5
#Valor que denota o número de frame que se contabiliza nas computações das
#colisões. Serve para excluir erros significativos nos cálculos intermédios
offset_colisao_dt = 2
#Valor usado para regular a entrada do efeito da gravidade na simulação
#Ou seja, quando o produto do número de objectos em cena pelo offset
#for maior que o número de impactos, começa a haver coalescência dos objectos
offset_antes_coalescencia = 120
#Valor usado para regular o desaparecimento da Terra após o impacto
offset_antes_desaparecer_terra = 4.
#Algoritmo a ser usado na determinação das componentes da velocidade das esferas após o impacto
#(0 = normal, 1 = usando centro de massa)
algoritmo_colisao = 1
Numero_asteroides_extra = 10 #Número de asteróides extra
G = 6.7e-11 #Constante gravitacional universal
Crest = 0.75 #Coeficiente de restituição para os impactos das particulas
####################################
##>> Valores referentes à Terra <<##
####################################
Posicao_terra = vector(0,0,0)
Raio_terra = 6371
Massa_terra = 5.9736E20 #Valor real = 5.9736E24
Velocidade_terra = vector(0,0,0)
Quantidade_mov_terra = Massa_terra * Velocidade_terra
#Desvio horizontal real = 7.155
#Inclinação real (tilt) = 23.44º
#y = 8
#x = tg(7.155) * 8 ~ 1
#z = tg(23.44) * 8 ~ 3.5
Eixo_rotacao_terra = vector(1, 8, 3.5)
#Definição do centro da janela
scene.center = (0, -2*Raio_terra, 0)
##################################
##>> Valores referentes à Lua <<##
##################################
Posicao_lua = vector(78440.5,0,0) #Valor real = 384405 km
Raio_lua = 1737.19
Massa_lua = 7.3477E19 #Valor real = 7.3477E22 kg
Velocidade_lua = vector(0,0,-7E2) #Valor real = 1.022 km/s
Quantidade_mov_lua = Massa_lua * Velocidade_lua
#Desvio horizontal real = 5.145º
#Inclinação real (tilt) = 6.68º
#y = 11
#x = tg(5.145) * 11 ~ 1
#z = tg(6.68) * 11 ~ 1.3
Eixo_rotacao_lua = vector(-1, 11, 1.3)
###########################################
##>> Valores referentes aos asteróides <<##
###########################################
Posicao_asteroide = vector(12*Raio_terra,-Raio_terra,5*Raio_terra)
Raio_asteroide = 855.010 #6 km (Raio estimado do asteróide que extinguiu os dinossauros
Massa_asteroide = 0.015 * Massa_terra #3E15 kg (Massa estimada do asteróide que extinguiu os dinossauros)
Massa_asteroide_extra = 0.1 * Massa_terra
Densidade_asteroide = Massa_asteroide_extra / (4/3*pi*Raio_asteroide**3)
#Número a ser multiplicado por -norm(pos_esfera_a - pos_esfera_b)
Offset_versor_Velocidade_asteroide = 1045.18
offset_versor_velocidade_asteroides_extra = 4000
#Numero a ser multiplicado pela componente x da posição do asteroide principal
Offset_x_asteroide_principal = 4
#Número a ser multiplicado pelo valor que irá ser acrescentado ao raio dos asteróides extra
offset_raio_extra_asteroides = 1
##########################################
##>> Valores referentes às partículas <<##
##########################################
Raio_particula = Raio_terra * 0.16
Massa_particula = Massa_terra * 0.05
######&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&######
## Listas para armazenar os dados das partículas e asteróides ##
######&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&######
Lista_particulas = []
Lista_posicoes = []
Lista_quantidade_mov = []
Lista_massa_particulas = []
Lista_raio_particulas = []
Numero_corpos = 0
######&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&######
## Criação da Terra, que esconderá as partículas no seu interior ##
######&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&######
#Criação da máscara da Terra
raio_ext = Raio_terra + Raio_particula
terra = sphere(pos=(0,0,0), radius=raio_ext, material=materials.earth)
terra.opacity = 1
######&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&######
## Criação da Lua ##
######&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&######
#Incorporação das características do Lua nas listas respectivas
Lista_particulas.append(sphere(pos=Posicao_lua, radius=Raio_lua,
material=materials.marble))
Lista_posicoes.append(Posicao_lua)
Lista_quantidade_mov.append(Quantidade_mov_lua)
Lista_massa_particulas.append(Massa_lua)
Lista_raio_particulas.append(Raio_lua)
orbita_lua = curve(pos = Posicao_lua, color = color.green)
Numero_corpos += 1
######&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&######
## Criação do asteróide principal ##
######&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&######
#Incorporação das características do asteróide principal nas listas respectivas
Lista_particulas.append(sphere(pos=Posicao_asteroide, radius=Raio_asteroide,
material=materials.marble, color = (200/256, 170/256, 140/256)))
Lista_posicoes.append(Posicao_asteroide)
#Vector que dá a direcção de impacto das esferas
Dist_lua_asteroide = Posicao_lua - Posicao_asteroide
#Velocidade do asteroide
Velocidade_asteroide_principal = norm(vector(Dist_lua_asteroide.x * Offset_x_asteroide_principal,
Dist_lua_asteroide.y, Dist_lua_asteroide.z))
#Quantidade de movimento do asteroide
p_asteroide = Massa_asteroide * Velocidade_asteroide_principal * Offset_versor_Velocidade_asteroide
Lista_quantidade_mov.append(p_asteroide)
#Massa e raio
Lista_massa_particulas.append(Massa_asteroide)
Lista_raio_particulas.append(Raio_asteroide)
#Orbita do asteroide principal
orbita_asteroide_principal = curve(pos = Posicao_asteroide, color = color.red)
Numero_corpos += 1
######&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&######
## Criação das partículas da Terra, agrupadas em forma de esfera ##
######&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&######
#Criação da esfera de partículas, de baixo para cima e da esquerda para a direita
#De forma a criarem uma esfera
#Quanto menor for o raio das particulas melhor será a simulação
#e o conjunto das partículas melhor se assemelhará a uma esfera
Numero_particulas = 0
#Valores minimos e máximos para o eixo dos y, x e z
ymin = -Raio_terra + Raio_particula
ymax = Raio_terra - Raio_particula
xzmin = -Raio_terra + Raio_particula
xzmax = Raio_terra - Raio_particula
#Valores iniciais
y = ymin
x = xzmin
z = xzmin
#Criação de uma esfera de partículas
while (y <= ymax):
#Criação de um plano de esferas
while (x <= xzmax):
#Criação de uma linha de esferas
while (z <= xzmax):
#A esfera só é inserida se estiver dentro da Terra
if (x**2+y**2+z**2 <= Raio_terra**2):
#Criação da partícula
Lista_particulas.append(sphere(pos=(x,y,z),
radius=Raio_particula, material=materials.marble,
color=(66/256,38/256,10/256)))
Lista_posicoes.append(vector(x,y,z))
Lista_quantidade_mov.append(vector(0,0,0))
Lista_massa_particulas.append(Massa_particula)
Lista_raio_particulas.append(Raio_particula)
#Incrementa-se o contador de número de objectos que já foram inseridos nas listas
Numero_particulas += 1
#Incrementa-se o z, para passar para a próxima esfera
z += 2*Raio_particula
#Incrementa-se o x para passar para a próxima linha de esferas
x += 2*Raio_particula
#Reinicializa-se o z, para começar no inicio da linha
z = xzmin
#Reinicializa-se o x e o z para começar no "inicio" do novo plano
z = xzmin
x = xzmin
#Incrementa-se o y para passar para o próximo plano de esferas
y += 2*Raio_particula
######&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&######
## Criação de Numeric Arrays a partir das listas criadas ##
######&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&######
Numero_objectos = Numero_corpos + Numero_particulas
#Uso de Numeric arrays (do módulo Numeric), para aumentar o desempenho
Array_particulas = array(Lista_particulas)
Array_posicoes = array(Lista_posicoes)
Array_quantidade_mov = array(Lista_quantidade_mov)
Array_massa_particulas = array(Lista_massa_particulas)
Array_massa_particulas.shape = (Numero_objectos, 1)
Array_raio_particulas = array(Lista_raio_particulas)
######&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&######
## Correcção das massas das partículas, para que o total seja igual à massa da Terra ##
######&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&######
Massa_particulas_corrigida = Massa_terra / Numero_particulas
for n in range(0, Numero_particulas):
Array_massa_particulas[Numero_corpos + n] = Massa_particulas_corrigida
######&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&######
##$$$$$ Loop que calcula as forças e colisões entre os corpos $$$$$##
######&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&######
#Cálculo do objecto que tem o maior raio, para o ressalto nos limites de
#movimento dos objectos
maior_raio_objecto = max(Raio_lua, 2*Raio_asteroide, Raio_particula)
Numero_impactos_terra = 0
Numero_impactos = 0
while True:
#Número de vezes por segundo que o loop será executado
rate(scene_rate)
#Rotação da Terra
terra.rotate(angle = (pi/90), axis = Eixo_rotacao_terra)
#Opacidade da Terra
if (Numero_impactos_terra > 0 and terra.opacity > 0):
terra.opacity -= terra.opacity * 0.1
#Rotação da Lua
Array_particulas[0].rotate(angle = (pi/180), axis = Eixo_rotacao_lua)
#Computação das componentes das distâcias entre cada par de partículas
Vector_distancia_par_particulas = Array_posicoes - Array_posicoes[:,newaxis]
#Computação das distâncias entre cada par de partículas
Distancia_par_particulas = sqrt(add.reduce(Vector_distancia_par_particulas
* Vector_distancia_par_particulas,-1))
#Computação das componentes das forças de todos os pares de partículas
F = G * Array_massa_particulas * Array_massa_particulas[:,newaxis] * Vector_distancia_par_particulas / Distancia_par_particulas[:,:,newaxis]**3
for n in range(Numero_objectos):
F[n,n] = 0 #A força de um objecto sobre si próprio é 0...
#Aplicação das forças aos objectos (às suas componetes de quantidade de movimento)
#Antes do impacto não são aplicadas as forças às partículas dentro da Terra
if (Numero_impactos_terra == 0):
Array_quantidade_mov[0:Numero_corpos] += sum(F[0:Numero_corpos],1) * dt
else:
Array_quantidade_mov += sum(F,1) * dt
#Computação das colisões entre partículas
#(matriz binária com os pares de objectos que colidiram)
Colisoes = less_equal(Distancia_par_particulas, (Array_raio_particulas + Array_raio_particulas[:,newaxis]))-identity(Numero_objectos)
#Antes de haver um impacto com Terra ignora-se as colisões entre as partículas da Terra
if Numero_impactos_terra == 0:
#Antes de haver uma colisão com a Terra, apenas se consideram as colisões dos restantes corpos
Lista_colisoes = zeros((Numero_objectos, Numero_objectos))
for i in range(0, Numero_corpos):
Lista_colisoes[i] = Colisoes[i]
#Lista com os indices das partículas que colidiram
#Cada elemento representa um par de particulas (i,j), que colidiu e está codificado da seguinte forma:
# i * Numero_particulas + j
Lista_colisoes = sort(nonzero(Lista_colisoes.flat)[0]).tolist()
#Remoção da colisão inversa (a em colisão com b é o mesmo que b em colisão com a, basta só calcular 1)
#Uso do try visto que na primeira colisão de uma esfera com as particulas da Terra não há par de
#colisão simétrico na matriz Lista_colisões_temp, visto que em cima reduzi essa matriz apenas aos elementos
#que não são partículas para que as colisões entre as partículas antes da colisão fossem ignoradas
try: Lista_colisoes.remove(j * Numero_objectos + i)
except: True
#Lista com as colisões de corpos com particulas da Terra
Lista_colisoes_com_terra = zeros((Numero_objectos, Numero_objectos))
for i in range(0, Numero_corpos):
for j in range(Numero_corpos, Numero_objectos):
Lista_colisoes_com_terra[i,j] = Colisoes[i,j]
Lista_colisoes_com_terra = sort(nonzero(Lista_colisoes_com_terra.flat)[0]).tolist()
try: Lista_colisoes_com_terra.remove(j * Numero_objectos + i)
except: True
else:
#Lista com as colisões de todas as partículas
Lista_colisoes = sort(nonzero(Colisoes.flat)[0]).tolist()
try: Lista_colisoes.remove(j * Numero_objectos + i)
except: True
#Computação das colisões "elásticas/inelásticas" entre pasrtículas, com conservação de momentum
for ij in Lista_colisoes:
i, j = divmod(ij, Numero_objectos) #Descodificação do par de esferas que colidiram
#Propriedades físicas das duas esferas
particula1_massa = Array_massa_particulas[i,0]
particula1_velocidade = Array_quantidade_mov[i] / particula1_massa
particula2_massa = Array_massa_particulas[j,0]
particula2_velocidade = Array_quantidade_mov[j] / particula2_massa
#Cálculo do momento em que as esferas entraram em contacto, de forma a que a computação das
#colisões seja feita exactamente quando as esferas se estão a tocar e não quando se estão a intersectar
#Sem esta parte, caso houvesse um aglomerado de esferas, (particulas da Terra, por exmeplo),
#muitas delas ficariam coladas e sobrepostas
#Portanto o objectivo é calcular t (momento entre o inicio e fim de dt em que a esfera1 entra em contacto com a esfera2)
#Como x = x0 + v0 * t
#Então P1(f) = P1(i) + V1(i) * t e P2(f) = P2(i) + V2(i) * t
#Sendo ~ um usado como delta para simplificar a leitura das equações
#Subtraindo a 2ª equação à 1ª e simplificando fica: ~P(f) = ~P(i) + t * ~V(i)
#Elevando ao quadrado ambos e expandindo fica: ~P(f)^2 = ~P(i)^2 + 2 * ~P(i) . ~V(i) + ~V(i)^2
#Como ~P(f) corresponde à distância entre as esferas, ~P(f) = D(t),
#o objectivo seria encontrar um t para o qual a distância fosse igual à soma das raios.
#Logo D(t)^2 = (r1 + r2)^2, e simplificando na equação anterior fica:
#~P(i)^2 + 2t * ~P(i) . ~V(i) + t**2 * ~V(i)^2 - (r1 + r2)^2 = 0
#Como se trata uma função quadrática pode-se determinar o t pela forma resolvente
#Sendo d = b^2 - 4ac, então x = (-b +- sqrt(d)) / 2a, seria a forma resolvente
#Portanto:
#d = (2 * ~P(i) . ~V(i) )^2 - 4 * ~V(i)^2 * (~P(i)^2 - (r1 + r2)^2)
#t = ( -(2 * ~P(i) . ~V(i)) +- sqrt(d)) / (2 * ~V(i))^2)
#Que simplificando, (tirando o 2 e o 4), fica:
#t = ( -(~P(i).~V(i)) +- sqrt( (~P(i).~V(i))^2 - (~P(i)^2 - (r1 + r2)^2) * ~V(i)^2 ) ) / ~V(i)^2
#Ou seja:
#t = (-b +- sqrt(b^2 - a*c) ) / a
deltaP_i = Array_posicoes[j] - Array_posicoes[i]
deltaV_i = particula2_velocidade - particula1_velocidade
a = mag(deltaV_i)**2
if a == 0: continue
#Se a diferença de velocidades é 0 é porque as esferas têm a mesma velocidade e como tal dispensa-se a computação da colisão
b = dot(deltaP_i, deltaV_i)
c = mag(deltaP_i)**2 - (Array_particulas[i].radius + Array_particulas[j].radius)**2
d = b**2 - a*c
if d < 0: continue #d nunca pode ser menor que 0 porque a raiz quadrada não admite números negativos.
d_sqrt = sqrt(d)
t1 = (-b+d_sqrt) / a
t2 = (-b-d_sqrt) / a
#Se o produto for negativo significa que uma colisão ocorreu no frame anterior e outra irá ocorrer mais tarde
#Ou seja, interessa determinar o t negativo menor que dt
#Caso não haja t negativo e menor que dt ignora-se a colisão, porque as esferas já não estão em contacto
#Por exemplo, no inicio de um dado frame podem haver 4 esferas em contacto, mas... por exemplo após computar a primeira colisão,
#uma delas pode já não estar em contacto com a esfera que estava no inicio do frame, porque aquela com que colidiu tirou-a de contacto com a outra
if (t1 * t2 > 0): continue
if (t1 <= t2):
tfinal = t1
else:
tfinal = t2
if (-tfinal > offset_colisao_dt * dt): continue #Caso ocorra algum erro de arredondamento significativo, ignora-se a colisão
#Portanto, o que resta fazer é andar para trás no tempo para que se faça a computação da colisão no momento exacto do contacto das esferas
#e após o cálculo, restituir o tempo que foi retrocedido
Array_posicoes[i] += particula1_velocidade * tfinal
Array_posicoes[j] += particula2_velocidade * tfinal
#Quando há coalescência de partículas, elas ficam juntas, logo o coeficiente de restituição é 0
if (Numero_impactos > Numero_objectos * offset_antes_coalescencia):
Crest = 0
##>>>>>>>>>> Computação das colisões <<<<<<<<<<##
#Vector normal ao plano de tangência entre as duas esferas
distancia_particulas = Array_particulas[j].pos - Array_particulas[i].pos
#Versor normal ao plano de tangência
vec_normal_ponto_impacto = norm(distancia_particulas)
#Versor tangente ao plano de tengência e perpendicular ao versor normal
vec_tangente_ponto_impacto = vector(-vec_normal_ponto_impacto.y, vec_normal_ponto_impacto.x, 0)
#Forma que usei inicialmente e que deu "problemas", e me atrasou considerávelmente no debuging do programa
#vec_tangente_ponto_impacto = vec_normal_ponto_impacto.rotate(angle = pi/2) #!!!$&!#$"@!!!
#Versor perpendicular ao 2 versores anteriores
vec_binormal_ponto_impacto = cross(vec_normal_ponto_impacto, vec_tangente_ponto_impacto)
#Selecção do algoritmo para computar as colisões
#Coloquei os 2 porque o primeiro dá para perceber melhor como funciona o segundo
if algoritmo_colisao == 0:
#Projecção das componentes segundo os versores determinados anteriormente
vx1 = dot(particula1_velocidade, vec_normal_ponto_impacto)
vy1 = dot(particula1_velocidade, vec_binormal_ponto_impacto)
vz1 = dot(particula1_velocidade, vec_tangente_ponto_impacto)
vx2 = dot(particula2_velocidade, vec_normal_ponto_impacto)
vy2 = dot(particula2_velocidade, vec_binormal_ponto_impacto)
vz2 = dot(particula2_velocidade, vec_tangente_ponto_impacto)
#Determinação das novas componentes do vector normal ao plano tangente usando o coeficiente de restituição
#(apenas se aplica ao vector normal, porque como os outros 2 versores estão contidos no plano
#tangente ao impacto, não são aplicadas forças segundo essas direcções)
particula1_vxfinal = ((Crest * particula2_massa * (vx2 - vx1) + particula1_massa*vx1 +
particula2_massa*vx2) / (particula1_massa + particula2_massa))
particula2_vxfinal = ((Crest * particula1_massa * (vx1 - vx2) + particula1_massa*vx1 +
particula2_massa*vx2) / (particula1_massa + particula2_massa))
#Determinação do vector da velocidade após o impacto, para cada uma das esferas
particula1_vfinal = particula1_vxfinal * vec_normal_ponto_impacto + vy1 * vec_binormal_ponto_impacto + vz1 * vec_tangente_ponto_impacto
particula2_vfinal = particula2_vxfinal * vec_normal_ponto_impacto + vy2 * vec_binormal_ponto_impacto + vz2 * vec_tangente_ponto_impacto
#Actualização das quantidades de movimento
Array_quantidade_mov[i] = particula1_vfinal * particula1_massa
Array_quantidade_mov[j] = particula2_vfinal * particula2_massa
else:
#Computação das colisões "elásticas" entre partículas, com conservação de momentum
#(versão alternativa, que usa o centro de massa para reduzir o número de cálculos e evitar tantos erros de arredondamento)
ptotal = Array_quantidade_mov[i] + Array_quantidade_mov[j]
mtotal = particula1_massa + particula2_massa
#Mudança para o centro de massa (cm frame)
velocidade_centro_massa = ptotal / mtotal
#Velocidades das partículas em relação ao centro de massa
vicm = particula1_velocidade - velocidade_centro_massa
vjcm = particula2_velocidade - velocidade_centro_massa
#Computação da colisão a partir do centro de massa (vf = -Cr * vi, sendo as componentes de vi correspondem
#às componentes do vector resultante da projecção de vi segundo o vector normal ao plano de tangente ao ponto de impacto)
vi_x_cm_f = -Crest * (dot(vicm, vec_normal_ponto_impacto) * vec_normal_ponto_impacto)
vi_y_cm_f = dot(vicm, vec_binormal_ponto_impacto) * vec_binormal_ponto_impacto
vi_z_cm_f = dot(vicm, vec_tangente_ponto_impacto) * vec_tangente_ponto_impacto
vj_x_cm_f = -Crest * (dot(vjcm, vec_normal_ponto_impacto) * vec_normal_ponto_impacto)
vj_y_cm_f = dot(vjcm, vec_binormal_ponto_impacto) * vec_binormal_ponto_impacto
vj_z_cm_f = dot(vjcm, vec_tangente_ponto_impacto) * vec_tangente_ponto_impacto
#Mudança para o sistema de eixos anterior (lab frame)
Array_quantidade_mov[i] = ((vi_x_cm_f + vi_y_cm_f + vi_z_cm_f) + velocidade_centro_massa) * particula1_massa
Array_quantidade_mov[j] = ((vj_x_cm_f + vj_y_cm_f + vj_z_cm_f) + velocidade_centro_massa) * particula2_massa
#Após a computação das colisões, restitui-se o tempo que foi retrocedido (- porque tfinal é negativo)
Array_posicoes[i] -= (Array_quantidade_mov[i] / particula1_massa) * tfinal
Array_posicoes[j] -= (Array_quantidade_mov[j] / particula2_massa) * tfinal
#Incrementa-se 1 para eu detectar quando devo de actualizar as listas (no fim do programa)
#(para saber que já não estou no frame da 1ª colisão)
if Numero_impactos_terra == 1:
Numero_impactos_terra +=1
#Incremento do número de impactos (usado para determinar quando a coalescência entre partículas deve começar)
if ((Numero_impactos_terra == 0) and (Lista_colisoes_com_terra)):
Numero_impactos_terra += 1
else:
Numero_impactos += 1
#Offset dado para que no início haja impacto entre as partículas e depois de certo
#tempo elas começas a juntar-se após os impactos
if (Numero_impactos > Numero_objectos * offset_antes_coalescencia):
#Se pelo menos uma delas não estiver visivel, ignora-se a colisão
if not Array_particulas[i].visible: continue
if not Array_particulas[j].visible: continue
#Determinação da partícula que tem o maior raio
maior_raio, menor_raio = i, j
if Array_raio_particulas[j] > Array_raio_particulas[i]:
maior_raio, menor_raio = j, i
#Fazendo um paralelo com o volume de uma esfera, calcula-se o novo raio (V = 4/3 * pi * r^3)
#Tem em conta o volume e não a massa / densidade das esferas
novo_raio = pow(Array_particulas[i].radius**3 + Array_particulas[j].radius**3, 1./3.)
nova_massa = Array_massa_particulas[i,0] + Array_massa_particulas[j,0]
#Não precisa de uma nova quantidade de movimento porque isso já foi feito aquando da colisão
#perfeitamente inelastica (coeficiente de restituição igual a 0)
#Criação da esfera resultante, na posição da esfera que tinha o maior raio
Array_particulas[maior_raio].radius = novo_raio
Array_raio_particulas[maior_raio] = novo_raio
Array_massa_particulas[maior_raio,0] = nova_massa
#Alteração da esfera de menor raio, para que não tenha quase nenhuma influência sobre as restantes esferas
#Não foi removida para que o tamanho das matrizes (arrays), não altere, dado que a sua eliminação daria problemas
#Quando fosse a processar novamente esses arrays
Array_particulas[menor_raio].visible = False #Particula fica invisivel
#Com massa e raio insignificantes
Array_particulas[menor_raio].radius = Raio_particula*1E-20
Array_raio_particulas[menor_raio] = Raio_particula*1E-20
Array_massa_particulas[menor_raio,0] = Massa_particula*1E-10
#E fora da área de movimento das restentes particulas
Array_posicoes[menor_raio] = vector(2*Tamanho_eixos + Tamanho_eixos*random(), 0, 0)
Array_particulas[menor_raio].pos = Array_posicoes[menor_raio]
#Com quantidade de movimento inicial igual a 0
Array_quantidade_mov[menor_raio] = vector(0,0,0)
#Para evitar problemas de visualização que acontecem no Visual Python quando um objecto está a uma grande distância dos restantes...
#Faz-se com que as esferas ressaltem quando ultrapassem uma determinada distância a partir do centro
#Objectos que ultrapassaram os planos x=-limite, y=-limite, z=-limite
Objectos_fora_simulacao_negativos = less_equal(Array_posicoes, -Tamanho_eixos + maior_raio_objecto) # walls closest to origin
pn = Array_quantidade_mov * Objectos_fora_simulacao_negativos
Array_quantidade_mov += -pn + abs(pn) #Faz-se com que o objecto se dirija para dentro
#Objectos que ultrapassaram os planos x=limite, y=limite, z=limite
Objectos_fora_simulacao_positivos = greater_equal(Array_posicoes, Tamanho_eixos - maior_raio_objecto) # walls farther from origin
pp = Array_quantidade_mov * Objectos_fora_simulacao_positivos
Array_quantidade_mov += -pp - abs(pp) #Faz-se com que o objecto se dirija para dentro
#Actualização das posições
Array_posicoes += (Array_quantidade_mov / Array_massa_particulas) * dt
#Actualização das localizações dos objectos
for i in range(0, Numero_objectos):
Array_particulas[i].pos = Array_posicoes[i]
#Actualização da orbita da lua e do asteróide principal
#Se tiver ultrapassado os eixos (por exemplo aquando da realocação da particula após coalescer com uma de maior raio),
#não se faz a actualização
if (Array_posicoes[0,0] < Tamanho_eixos):
orbita_lua.append(Array_posicoes[0])
if (Array_posicoes[1,0] < Tamanho_eixos):
orbita_asteroide_principal.append(Array_posicoes[1])
#Actualização das listas que mais tarde irão ser reusadas aquando da criação dos objectos extra
if (Numero_impactos_terra == 1):
for i in range(0, Numero_corpos):
Lista_particulas[i].pos = Array_particulas[i].pos
Lista_posicoes[i] = Array_posicoes[i]
Lista_quantidade_mov[i] = Array_quantidade_mov[i]
######&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&######
## Criação dos asteróides extra, após o impacto do asteróide principal ##
######&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&######
Numero_corpos_antes_colisao = Numero_corpos
if (Numero_impactos_terra == 1):
#Criação de asteróides extra
for i in range(Numero_asteroides_extra):
#Determinação das posições dos asteroides aleatoriamente
x = -Tamanho_eixos+2*Tamanho_eixos*random()
y = -Tamanho_eixos+2*Tamanho_eixos*random()
z = -Tamanho_eixos+2*Tamanho_eixos*random()
posicao = vector(x,y,z)
Lista_posicoes.append(posicao)
#Determinação aleatória do raio do asteroide (tendo como base o asteroide principal)
raio = Raio_asteroide + offset_raio_extra_asteroides * Raio_asteroide * random()
Lista_particulas.append(sphere(pos=posicao, radius=raio, material=materials.marble, color = (175/256, 150/256, 100/256)))
Lista_raio_particulas.append(raio)
#Determinação da massa tendo em conta o raio obtido anteriormente e a densidade dos asteroides
massa = Densidade_asteroide * (4/3*pi*raio**3)
Lista_massa_particulas.append(massa)
#Determinação da velocidade de forma a que o asteroide passa pela origem e colida com a Terra
velocidade = -offset_versor_velocidade_asteroides_extra * norm(posicao)
#Determinação da quantidade de movimento
p_asteroide = velocidade * massa
Lista_quantidade_mov.append(p_asteroide)
Numero_corpos += 1
######&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&######
## Actualização dos Numeric Arrays a partir das listas actualizadas ##
######&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&######
Numero_objectos = Numero_corpos + Numero_particulas
#Uso de Numeric arrays (do módulo Numeric), para aumentar o desempenho
Array_particulas = array(Lista_particulas)
Array_posicoes = array(Lista_posicoes)
Array_quantidade_mov = array(Lista_quantidade_mov)
Array_massa_particulas = array(Lista_massa_particulas)
Array_massa_particulas.shape = (Numero_objectos, 1)
Array_raio_particulas = array(Lista_raio_particulas)
######&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&######
## Correcção das massas das partículas, para que o total seja igual à massa da Terra ##
######&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&######
Massa_particulas_corrigida = Massa_terra / Numero_particulas
for n in range(0, Numero_particulas):
Array_massa_particulas[Numero_corpos_antes_colisao + n] = Massa_particulas_corrigida
######&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&######
##$$$$$ Alguma da bibliografia relevante usada $$$$$##
######&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&######
"""
http://vpython.org/contents/docs/visual/index.html
http://docs.python.org/tutorial/
http://homes.esat.kuleuven.be/~python/doc/numpy/array.html
http://www.gamedev.net/reference/articles/article1234.asp
http://qbx6.ltu.edu/s_schneider/physlets/main/momenta4.shtml
http://www.phy.ntnu.edu.tw/ntnujava/index.php?topic=4
http://academicearth.org/lectures/elastic-and-inelastic-collisions
http://cnx.org/content/m14852/latest/
http://vam.anest.ufl.edu/physics/collisionphysics.html
http://en.wikipedia.org/wiki/Inelastic_collision
http://www.vobarian.com/collisions/
http://www.vobarian.com/collisions/2dcollisions2.pdf
http://www.lightandmatter.com/html_books/2cl/ch04/ch04.html
"""
###########$$$$$$$$$>>>>>>>>>> Nota <<<<<<<<<<$$$$$$$$$###########
# Parte do código referente aos numeric arrays é baseado no que está #
# presente nos ficheiros de exemplo stars.py e gas.py que vêm com o VPython #
###########&&&&&&&&&>>>>>>>>>>>>>>>>> <<<<<<<<<<<<<<<<<<$$$$$$$$$###########
| carlosmccosta/Asteroid-Impact | Source code/Asteroid impact on Earth.py | Python | mit | 35,469 |
import os
import pynq
import pytest
from pyfakefs.fake_filesystem import FakeDirectory
class DtboDirectory(FakeDirectory):
def __init__(self, *args, update=True, **kwargs):
self._update = update
super().__init__(*args, **kwargs)
def add_entry(self, path_object):
if not isinstance(path_object, FakeDirectory):
raise OSError(1, "Operation not permitted")
super().add_entry(path_object)
self.filesystem.create_file(os.path.join(path_object.path, 'status'),
contents='unapplied\n')
self.filesystem.create_file(os.path.join(path_object.path, 'dtbo'),
side_effect=self._add_dtbo)
def _add_dtbo(self, fd):
if self._update:
dtbo_dir = os.path.dirname(fd.path)
with open(os.path.join(dtbo_dir, 'status'), 'w') as f:
f.write('applied\n')
def _init_dtbo_fs(fs, update=True):
dtbo_dir = DtboDirectory('overlays', filesystem=fs, update=update)
fs.create_dir('/sys/kernel/config/device-tree')
fs.add_object('/sys/kernel/config/device-tree/', dtbo_dir)
def test_fake_dtbo_dir(fs):
_init_dtbo_fs(fs)
dtbo_dir = '/sys/kernel/config/device-tree/overlays/my_dtbo'
dtbo_dtbo = os.path.join(dtbo_dir, 'dtbo')
dtbo_status = os.path.join(dtbo_dir, 'status')
assert os.path.exists('/sys/kernel/config/device-tree/overlays')
os.mkdir(dtbo_dir)
assert os.path.exists(dtbo_dir)
assert os.path.exists(dtbo_dtbo)
assert os.path.exists(dtbo_status)
with open(dtbo_dtbo, 'w') as f:
f.write('A DTBO file')
with open(dtbo_status, 'r') as f:
assert f.read() == 'applied\n'
DTBO_DATA = 'A DTBO File'
def test_device_tree_applies(fs):
_init_dtbo_fs(fs, True)
fs.create_file('/home/xilinx/test.dtbo', contents=DTBO_DATA)
dtbo = pynq.devicetree.DeviceTreeSegment('/home/xilinx/test.dtbo')
assert dtbo.is_dtbo_applied() is False
dtbo.insert()
assert dtbo.is_dtbo_applied() is True
with open('/sys/kernel/config/device-tree/overlays/test/dtbo', 'r') as f:
assert f.read() == DTBO_DATA
# We need to ensure the directory is empty prior to removal
os.unlink('/sys/kernel/config/device-tree/overlays/test/dtbo')
os.unlink('/sys/kernel/config/device-tree/overlays/test/status')
dtbo.remove()
assert dtbo.is_dtbo_applied() is False
def test_device_tree_no_apply(fs):
_init_dtbo_fs(fs, False)
fs.create_file('/home/xilinx/test.dtbo', contents=DTBO_DATA)
dtbo = pynq.devicetree.DeviceTreeSegment('/home/xilinx/test.dtbo')
assert dtbo.is_dtbo_applied() is False
with pytest.raises(RuntimeError):
dtbo.insert()
def test_file_missing(fs):
_init_dtbo_fs(fs, False)
with pytest.raises(IOError):
pynq.devicetree.DeviceTreeSegment('/home/xilinx/test.dtbo')
def test_double_remove(fs):
_init_dtbo_fs(fs, True)
fs.create_file('/home/xilinx/test.dtbo', contents=DTBO_DATA)
dtbo = pynq.devicetree.DeviceTreeSegment('/home/xilinx/test.dtbo')
dtbo.insert()
# Verify empty directory
os.unlink('/sys/kernel/config/device-tree/overlays/test/dtbo')
os.unlink('/sys/kernel/config/device-tree/overlays/test/status')
dtbo.remove()
dtbo.remove()
| yunqu/PYNQ | tests/test_devicetree.py | Python | bsd-3-clause | 3,298 |
#!/usr/bin/python
# The MIT License (MIT)
#
# Copyright (c) 2017 Tag Games Limited
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-----------------------------------------------------------------------------------
import sys
sys.path.append("../../ChilliSource/Tools/Scripts/")
import os
import platform
import rpi_build
# RPi
COMPILER = "g++"
ARCHIVER = "ar"
LINKER = "g++"
NUM_JOBS = 2
# Windows
if os.name == 'nt':
COMPILER = "C:\SysGCC\Raspberry\\bin\\arm-linux-gnueabihf-g++.exe"
ARCHIVER = "C:\SysGCC\Raspberry\\bin\\arm-linux-gnueabihf-ar.exe"
LINKER = "C:\SysGCC\Raspberry\\bin\\arm-linux-gnueabihf-g++.exe"
NUM_JOBS = None #Unrestricted
# Mac OS
elif platform.system() == "Darwin":
COMPILER = "/Volumes/xtools/arm-none-linux-gnueabi/bin/arm-none-linux-gnueabi-g++"
ARCHIVER = "/Volumes/xtools/arm-none-linux-gnueabi/bin/arm-none-linux-gnueabi-ar"
LINKER = "/Volumes/xtools/arm-none-linux-gnueabi/bin/arm-none-linux-gnueabi-g++"
NUM_JOBS = None #Unrestricted
PROJECT_ROOT = os.path.normpath("../..")
APP_SRC_ROOT = os.path.normpath("{}/AppSource".format(PROJECT_ROOT))
BUILD_DIR = os.path.normpath("{}/Projects/RPi/Build".format(PROJECT_ROOT))
OUTPUT_DIR = os.path.normpath("{}/Projects/RPi/Output".format(PROJECT_ROOT))
ADDITIONAL_COMPILER_FLAGS_TARGET_MAP = { "debug":"-DCS_LOGLEVEL_VERBOSE", "release":"-DCS_LOGLEVEL_WARNING"}
ADDITIONAL_INCLUDE_PATHS = "-I{0}/AppSource -I{0}/Libraries/Catch/include".format(PROJECT_ROOT)
ADDITIONAL_LIBRARY_PATHS = ""
ADDITIONAL_LIBRARIES = ""
ADDITIONAL_SRC_DIRS = [os.path.normpath('{}/'.format(APP_SRC_ROOT))]
APP_NAME = "CSTest"
# The entry point into the script.
#
# @param args
# The list of arguments - Should have an additional argument "debug" or "release" optionally followed by "clean"
#
def main(args):
rpi_build.run(args=args,
num_jobs=NUM_JOBS,
app_name=APP_NAME,
compiler_path=COMPILER, linker_path=LINKER, archiver_path=ARCHIVER,
additional_libs=ADDITIONAL_LIBRARIES, additional_lib_paths=ADDITIONAL_LIBRARY_PATHS, additional_include_paths=ADDITIONAL_INCLUDE_PATHS,
additional_compiler_flags_map=ADDITIONAL_COMPILER_FLAGS_TARGET_MAP,
app_source_dirs=ADDITIONAL_SRC_DIRS,
project_root=PROJECT_ROOT, build_root=BUILD_DIR, output_root=OUTPUT_DIR)
if __name__ == "__main__":
main(sys.argv[1:])
| ChilliWorks/CSTest | Projects/RPi/build.py | Python | mit | 3,307 |
#Alexa
Security_Profile_Description = "TheScrivener"
Security_Profile_ID = "amzn1.application.21a811c4248b41e484645877b7dc591c"
Client_ID = "amzn1.application-oa2-client.2285b83f7d214ff7b3960126aa97eed0"
Client_Secret = "6da338b5d6ca88522439d5235c1c4735239f551ae2cfd5b4af3dfba7079e9b7f"
Product_ID = "ConCoEcho"
#Redis
redis_url = "https://bartleby.herokuapp.com/code"
| kplus87/bartleby | static/creds.py | Python | mit | 371 |
__author__ = 'sarangis'
from src.ir.function import *
from src.ir.module import *
from src.ir.instructions import *
BINARY_OPERATORS = {
'+': lambda x, y: x + y,
'-': lambda x, y: x - y,
'*': lambda x, y: x * y,
'**': lambda x, y: x ** y,
'/': lambda x, y: x / y,
'//': lambda x, y: x // y,
'<<': lambda x, y: x << y,
'>>': lambda x, y: x >> y,
'%': lambda x, y: x % type(x)(y),
'&': lambda x, y: x & y,
'|': lambda x, y: x | y,
'^': lambda x, y: x ^ y,
}
class IRBuilder:
""" The main builder to be used for creating instructions. This has to be used to insert / create / modify instructions
This class will have to support all the other class creating it.
"""
def __init__(self, current_module = None, context=None):
self.__module = current_module
self.__insertion_point = None
self.__insertion_point_idx = 0
self.__orphaned_instructions = []
self.__context = context
self.__current_bb = None
@property
def module(self):
return self.__module
@module.setter
def module(self, mod):
self.__module = mod
@property
def context(self):
return self.__context
@context.setter
def context(self, ctx):
self.__context = ctx
def get_current_bb(self):
assert self.__current_bb is not None
return self.__current_bb
def insert_after(self, ip):
if isinstance(ip, BasicBlock):
self.__insertion_point = ip
self.__insertion_point_idx = 0
self.__current_bb = ip
elif isinstance(ip, Instruction):
self.__insertion_point = ip
self.__insertion_point_idx = ip.parent.find_instruction_idx(ip)
if self.__insertion_point_idx is None:
raise InvalidInstructionException("Count not find instruction in its parent basic block")
else:
self.__insertion_point_idx += 1
else:
raise InvalidTypeException("Expected either Basic Block or Instruction")
def insert_before(self, ip):
if isinstance(ip, BasicBlock):
self.__insertion_point = ip
self.__insertion_point_idx = -1
self.__current_bb = ip
elif isinstance(ip, Instruction):
self.__insertion_point = ip
self.__insertion_point_idx = ip.parent.find_instruction_idx(ip)
if self.__insertion_point_idx == None:
raise InvalidInstructionException("Count not find instruction in its parent basic block")
elif self.__insertion_point_idx == 0:
self.__insertion_point_idx = 0
else:
self.__insertion_point_idx -= 1
else:
raise InvalidTypeException("Expected either Basic Block or Instruction")
def __add_instruction(self, inst):
if self.__insertion_point_idx == -1:
# This is an orphaned instruction
self.__orphaned_instructions.append(inst)
elif isinstance(self.__insertion_point, BasicBlock):
self.__insertion_point.instructions.append(inst)
self.__insertion_point = inst
elif isinstance(self.__insertion_point, Instruction):
bb = self.__insertion_point.parent
bb.instructions.insert(self.__insertion_point_idx + 1, inst)
self.__insertion_point_idx += 1
self.__insertion_point = inst
else:
raise Exception("Could not add instruction")
def const_fold_binary_op(self, lhs, rhs, op):
return None
# if isinstance(lhs, Number) and isinstance(rhs, Number):
# lhs = lhs.number
# rhs = rhs.number
# result = BINARY_OPERATORS[op](lhs, rhs)
# return Number(result)
# else:
# return None
def create_function(self, name, args):
f = Function(name, args)
self.__module.functions[name] = f
return f
def set_entry_point(self, function):
self.__module.entry_point = function
def create_global(self, name, initializer):
g = Global(name, initializer)
self.__module.add_global(g)
def create_basic_block(self, name, parent):
bb = BasicBlock(name, parent)
return bb
def create_return(self, value = None, name=None):
ret_inst = ReturnInstruction(value)
self.__add_instruction(ret_inst)
def create_branch(self, bb, name=None):
if not isinstance(bb, BasicBlock):
raise InvalidTypeException("Expected a Basic Block")
branch_inst = BranchInstruction(bb, self.__current_bb, name)
self.__add_instruction(branch_inst)
return branch_inst
def create_cond_branch(self, cmp_inst, value, bb_true, bb_false, name=None):
cond_branch = ConditionalBranchInstruction(cmp_inst, value, bb_true, bb_false, self.__current_bb, name)
self.__add_instruction(cond_branch)
return cond_branch
def create_call(self, func, args, name=None):
call_inst = CallInstruction(func, args, self.__current_bb, name)
self.__add_instruction(call_inst)
return call_inst
def create_add(self, lhs, rhs, name=None):
folded_inst = self.const_fold_binary_op(lhs, rhs, '+')
if folded_inst is not None:
return folded_inst
add_inst = AddInstruction(lhs, rhs, self.__current_bb, name)
self.__add_instruction(add_inst)
return add_inst
def create_sub(self, lhs, rhs, name=None):
folded_inst = self.const_fold_binary_op(lhs, rhs, '-')
if folded_inst is not None:
return folded_inst
sub_inst = SubInstruction(lhs, rhs, self.__current_bb, name)
self.__add_instruction(sub_inst)
return sub_inst
def create_mul(self, lhs, rhs, name=None):
folded_inst = self.const_fold_binary_op(lhs, rhs, '*')
if folded_inst is not None:
return folded_inst
mul_inst = MulInstruction(lhs, rhs, self.__current_bb, name)
self.__add_instruction(mul_inst)
return mul_inst
def create_div(self, lhs, rhs, name=None):
folded_inst = self.const_fold_binary_op(lhs, rhs, '/')
if folded_inst is not None:
return folded_inst
div_inst = DivInstruction(lhs, rhs, self.__current_bb, name)
self.__add_instruction(div_inst)
return div_inst
def create_icmp(self, lhs, rhs, comparator, name=None):
icmp_inst = ICmpInstruction(CompareTypes.SLE, lhs, rhs, self.__current_bb, name)
self.__add_instruction(icmp_inst)
return icmp_inst
def create_select(self, cond, val_true, val_false, name=None):
select_inst = SelectInstruction(cond, val_true, val_false, self.__current_bb, name)
self.__add_instruction(select_inst)
return select_inst
def create_alloca(self, numEls=None, name=None):
alloca_inst = AllocaInstruction(numEls, self.__current_bb, name)
self.__add_instruction(alloca_inst)
return alloca_inst
def create_load(self, alloca):
load_inst = LoadInstruction(alloca, parent=self.__current_bb)
self.__add_instruction(load_inst)
return load_inst
def create_store(self, alloca, value):
store_inst = StoreInstruction(alloca, value, parent=self.__current_bb)
self.__add_instruction(store_inst)
return store_inst
def create_shl(self, op1, op2, name=None):
folded_inst = self.const_fold_binary_op(op1, op2, '<<')
if folded_inst is not None:
return folded_inst
shl_inst = ShiftLeftInstruction(op1, op2, self.__current_bb, name)
self.__add_instruction(shl_inst)
return shl_inst
def create_lshr(self, op1, op2, name=None):
folded_inst = self.const_fold_binary_op(op1, op2, '>>')
if folded_inst is not None:
return folded_inst
lshr_inst = LogicalShiftRightInstruction(op1, op2, self.__current_bb, name)
self.__add_instruction(lshr_inst)
return lshr_inst
def create_ashr(self, op1, op2, name=None):
ashr_inst = ArithmeticShiftRightInstruction(op1, op2, self.__current_bb, name)
self.__add_instruction(ashr_inst)
return ashr_inst
def create_and(self, op1, op2, name=None):
folded_inst = self.const_fold_binary_op(op1, op2, '&')
if folded_inst is not None:
return folded_inst
and_inst = AndInstruction(op1, op2, self.__current_bb, name)
self.__add_instruction(and_inst)
return and_inst
def create_or(self, op1, op2, name=None):
folded_inst = self.const_fold_binary_op(op1, op2, '|')
if folded_inst is not None:
return folded_inst
or_inst = OrInstruction(op1, op2, self.__current_bb, name)
self.__add_instruction(or_inst)
return or_inst
def create_xor(self, op1, op2, name=None):
folded_inst = self.const_fold_binary_op(op1, op2, '^')
if folded_inst is not None:
return folded_inst
xor_inst = XorInstruction(op1, op2, self.__current_bb, name)
self.__add_instruction(xor_inst)
return xor_inst
def create_number(self, number):
number = Number(number)
return number
def create_string(self, string):
string_obj = String(string)
return string_obj
#def create_vector(self, baseTy, numElts, name=None):
# vecTy = VectorType(baseTy, numElts)
# alloca = self.create_alloca(vecTy, 1, None, name)
# vec = self.create_load(alloca)
# return vec | ssarangi/spiderjit | src/ir/irbuilder.py | Python | mit | 9,699 |
import numpy as np
import sys, os
import nrrd
from scipy import ndimage
if (len(sys.argv) < 2):
print('Error: missing arguments!')
print('e.g. python centreOfMass.py imageIn.nrrd')
else:
Iin = str(sys.argv[1])
data1, header1 = nrrd.read(Iin)
print(list(np.array(ndimage.measurements.center_of_mass(data1),dtype=np.int)))
| Robbie1977/NRRDtools | centreOfMass.py | Python | mit | 348 |
#!/usr/bin/env python2
# Copyright (c) 2015 The Aureus Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import hashlib
import sys
import os
from random import SystemRandom
import base64
import hmac
if len(sys.argv) < 2:
sys.stderr.write('Please include username as an argument.\n')
sys.exit(0)
username = sys.argv[1]
#This uses os.urandom() underneath
cryptogen = SystemRandom()
#Create 16 byte hex salt
salt_sequence = [cryptogen.randrange(256) for i in range(16)]
hexseq = list(map(hex, salt_sequence))
salt = "".join([x[2:] for x in hexseq])
#Create 32 byte b64 password
password = base64.urlsafe_b64encode(os.urandom(32))
digestmod = hashlib.sha256
if sys.version_info.major >= 3:
password = password.decode('utf-8')
digestmod = 'SHA256'
m = hmac.new(bytearray(salt, 'utf-8'), bytearray(password, 'utf-8'), digestmod)
result = m.hexdigest()
print("String to be appended to aureus.conf:")
print("rpcauth="+username+":"+salt+"$"+result)
print("Your password:\n"+password)
| hideoussquid/aureus-12-bitcore | share/rpcuser/rpcuser.py | Python | mit | 1,108 |
# -*- coding: utf-8 -*-
# Copyright © 2017 Oihane Crucelaegui - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from . import models
from . import wizard
| oihane/temp-addons | stock_package_creator/__init__.py | Python | agpl-3.0 | 182 |
# coding: utf-8
from django.conf.urls import patterns, url
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.sitemaps import GenericSitemap
from django.contrib.sitemaps.views import sitemap
from models import ShortStory, Novel, Chapter
# SiteMap data
sitemaps = {
'short_stories': GenericSitemap(
{'queryset': ShortStory.objects.all(), 'date_field': 'last_update'},
changefreq='daily',
priority=0.7
),
'novels': GenericSitemap(
{'queryset': Novel.objects.all(), 'date_field': 'last_update'},
changefreq='weekly',
priority=0.7
),
'chapters': GenericSitemap(
{'queryset': Chapter.objects.all(), 'date_field': 'last_update'},
changefreq='daily',
priority=0.5
),
}
urlpatterns = patterns(
'',
url(r'^$', u'literature.views.home'),
url(ur'^roman/(?P<slug_novel>.+)/(?P<slug_chapter>.+)/$', u'literature.views.chapter'),
url(ur'^roman/(?P<slug>.+)/$', u'literature.views.novel'),
url(ur'^nouvelle/(?P<slug>.+)/$', u'literature.views.short_story'),
url(r'^sitemap\.xml$', sitemap, {'sitemaps': sitemaps}, name='django.contrib.sitemaps.views.sitemap'),
)
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| SpaceFox/textes | literature/urls.py | Python | mit | 1,319 |
class summing_list:
layers = [[0]]
size = 0
def __init__(self, iter=None):
if iter != None:
for i in iter:
self.append(i)
def _sum(self, i):
t = 0
for r in self.layers:
if i % 2:
t += r[i - 1]
i >>= 1
return t
def sum_elements(self, i=None, j=None):
if j == None:
if i == None:
i = self.size
return self._sum(i)
else:
return self._sum(max(i, j)) - self._sum(min(i, j))
def __getitem__(self, i):
if i < self.size:
return self.layers[0][i]
else:
raise ValueError()
def __setitem__(self, i, v):
d = v - self.layers[0][i]
for r in self.layers:
r[i] += d
i >>= 1
def _double_size(self):
for r in self.layers:
r += [0] * len(r)
self.layers += [[self.layers[-1][0]]]
def __iadd__(self, iter):
for i in iter:
self.append(i)
return self
def __add__(self, x):
both = summing_list(self)
both += x
return both
def append(self, x):
self.size += 1
if self.size > len(self.layers[0]):
self._double_size()
self[self.size - 1] = x
def __repr__(self):
return self.layers[0][:self.size].__repr__()
def __iter__(self):
return iter(self.layers[0][:self.size])
| subhrm/google-code-jam-solutions | solutions/helpers/CodeJam-0.3.0/codejam/datastructures/summing_list.py | Python | mit | 1,482 |
#!/usr/bin/python
# -'''- coding: utf-8 -'''-
from glob import glob
import os
import subprocess
from PySide.QtCore import *
from PySide.QtGui import *
import BasketBuilder
import BasketGlobals as config
class WindowLayout(QTabWidget):
# Define Emitter Signals
launch = Signal(int, str)
createnew = Signal(int)
openasset = Signal(str)
newasset = Signal(str)
# renderscene = Signal(int, str, str)
def __init__(self, parent=None):
super(WindowLayout, self).__init__(parent)
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
# TABS
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
self.tabAssets = QWidget()
self.tabShots = QWidget()
self.tabMisc = QWidget()
self.addTab(self.tabShots, "tabShots")
self.addTab(self.tabAssets, "tabAssets")
self.addTab(self.tabMisc, "tabMisc")
self.setTabText(0, "Shots")
self.setTabText(1, "Assets")
self.setTabText(2, "Misc")
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
# SHOTS
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
# S3 INPUTS
self.label_scene = QLabel('Scene')
self.label_scene.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.label_shot = QLabel('Shot')
self.label_shot.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.dropdown_scene = QComboBox()
self.dropdown_scene.setMinimumWidth(100)
self.dropdown_shot = QComboBox()
self.dropdown_shot.setMinimumWidth(100)
# S3 LAYOUT
hbox_scene = QHBoxLayout()
hbox_scene.addWidget(self.label_scene)
hbox_scene.addWidget(self.dropdown_scene)
hbox_shot = QHBoxLayout()
hbox_shot.addWidget(self.label_shot)
hbox_shot.addWidget(self.dropdown_shot)
# MISC WIDGETS
self.label_options = QLabel('Options')
self.label_tag = QLabel('Tag')
self.dropdown_tag = QComboBox()
self.label_stage = QLabel('Stage')
self.dropdown_stage = QComboBox()
self.dropdown_stage.setMinimumWidth(100)
for i_stage, t_stage in enumerate(config.STAGE_DIRS):
self.dropdown_stage.addItem(t_stage)
# MISC LAYOUT
vbox_tag = QVBoxLayout()
vbox_tag.addWidget(self.label_tag)
vbox_tag.addWidget(self.dropdown_tag)
vbox_stage = QVBoxLayout()
vbox_stage.addWidget(self.label_stage)
vbox_stage.addWidget(self.dropdown_stage)
# LAUNCH BUTTONS
self.btn_launch = QPushButton('Launch Existing...')
self.btn_create = QPushButton('Create New...')
# self.label_render = QLabel('Make Sure Your Camera is Set in the Render Settings!')
# self.label_camera = QLabel('Alt Camera:')
# self.text_camera = QLineEdit()
# self.btn_render = QPushButton('Render Draft')
# Check if there is an existing file
self.updateDB()
self.dropdown_scene.currentIndexChanged.connect(self.updateShotList)
self.dropdown_stage.currentIndexChanged.connect(self.updateEnv)
self.dropdown_shot.currentIndexChanged.connect(self.updateEnv)
# LAUNCH SIGNALS
self.btn_launch.clicked.connect(self.emitlaunch)
# self.btn_launch.clicked.connect(QCoreApplication.instance().quit)
self.btn_create.clicked.connect(self.emitcreate)
# self.btn_create.clicked.connect(QCoreApplication.instance().quit)
# self.btn_render.clicked.connect(self.emitrender)
# APP LAYOUT
layout = QVBoxLayout()
appWrapper = QHBoxLayout()
leftColumn = QVBoxLayout()
leftUpper = QVBoxLayout()
leftUpper.addLayout(hbox_scene)
leftUpper.addLayout(hbox_shot)
leftUpper.addStretch(3)
leftUpper.setContentsMargins(20, 20, 20, 20)
leftLower = QVBoxLayout()
leftLower.addWidget(self.btn_launch)
leftLower.addWidget(self.btn_create)
leftLower.setContentsMargins(20, 0, 20, 0)
leftColumn.addLayout(leftUpper)
leftColumn.addLayout(leftLower)
rightColumn = QVBoxLayout()
rightColumn.addWidget(self.label_options)
rightColumn.addLayout(vbox_tag)
rightColumn.addLayout(vbox_stage)
rightColumn.addStretch(3)
bottomRow = QVBoxLayout()
line = QFrame()
line.setFrameStyle(QFrame.HLine | QFrame.Sunken)
line.setLineWidth(1)
line.setMidLineWidth(1)
# bottomRow.addWidget(line)
# bottomContent = QVBoxLayout()
# camLayout = QHBoxLayout()
# camLayout.addWidget(self.label_camera)
# camLayout.addWidget(self.text_camera)
# bottomContent.addLayout(camLayout)
# bottomContent.addWidget(self.label_render)
# bottomContent.addWidget(self.btn_render)
# bottomContent.setContentsMargins(0,20,0,20)
#
# bottomRow.addLayout(bottomContent)
appWrapper.addLayout(leftColumn)
appWrapper.addLayout(rightColumn)
layout.addLayout(appWrapper)
# layout.addLayout(bottomRow)
self.tabShots.setLayout(layout)
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
# ASSETS
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
self.btn_browse = QPushButton("Browse Assets")
self.btn_AssetLaunch = QPushButton("Launch")
self.label_Directory = QLabel("Directory:")
self.label_Directory.setAlignment(Qt.AlignLeft | Qt.AlignTop)
self.text_Directory = QLineEdit()
self.label_AssetName = QLabel('Name: ')
self.text_AssetName = QLineEdit()
self.btn_NewAsset = QPushButton('New Asset')
self.btn_AssetLaunch.clicked.connect(self.launchAsset)
self.btn_browse.clicked.connect(self.browseAssets)
self.btn_NewAsset.clicked.connect(self.launchNewAsset)
assetLayout = QVBoxLayout()
inputLayout = QVBoxLayout()
buttonLayout = QHBoxLayout()
inputLayout.addWidget(self.label_Directory)
inputLayout.addWidget(self.text_Directory)
buttonLayout.addWidget(self.btn_browse)
buttonLayout.addWidget(self.btn_AssetLaunch)
inputLayout.addLayout(buttonLayout)
inputLayout.addStretch(3)
inputLayout.setContentsMargins(0, 20, 0, 20)
newInput = QHBoxLayout()
newInput.addWidget(self.label_AssetName)
newInput.addWidget(self.text_AssetName)
newassetLayout = QVBoxLayout()
newassetLayout.addLayout(newInput)
newassetLayout.addWidget(self.btn_NewAsset)
newassetLayout.addStretch(3)
newassetLayout.setContentsMargins(0, 20, 0, 20)
assetLayout.addLayout(inputLayout)
assetLayout.addWidget(line)
assetLayout.addLayout(newassetLayout)
self.tabAssets.setLayout(assetLayout)
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
# MISC
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
self.label_Browser = QLabel("LAW Server Folder: ")
self.btn_Folder = QPushButton("Open")
self.btn_Folder.clicked.connect(self.openExplorer)
self.link_WebLogin = QLabel()
self.link_Trello = QLabel()
self.link_WebLogin.setText('''<a href="http://lobstersare.online/wp-login.php">Site Login</a>''')
self.link_WebLogin.setOpenExternalLinks(True)
self.link_Trello.setText('''<a href="https://trello.com/b/OEhZ5SGb">Trello Board</a>''')
self.link_Trello.setOpenExternalLinks(True)
miscLayout = QVBoxLayout()
folderLayout = QHBoxLayout()
linkLayout = QVBoxLayout()
folderLayout.addWidget(self.label_Browser)
folderLayout.addWidget(self.btn_Folder)
linkLayout.addWidget(self.link_WebLogin)
linkLayout.addWidget(self.link_Trello)
linkLayout.addStretch(3)
miscLayout.addLayout(folderLayout)
miscLayout.addLayout(linkLayout)
self.tabMisc.setLayout(miscLayout)
def openExplorer(self):
subprocess.Popen(r'explorer \\awexpress.westphal.drexel.edu\digm_anfx\SRPJ_LAW')
def browseAssets(self):
assetFile = QFileDialog.getOpenFileName(self,
"Open Asset",
os.path.join(config.serverDir(), 'working', 'assets'),
)
correctedPath = assetFile[0].replace('//', '\\\\').replace('/', '\\')
self.text_Directory.setText(correctedPath)
def launchAsset(self):
if self.text_Directory.text() != '':
self.openasset.emit(self.text_Directory.text())
def launchNewAsset(self):
if self.text_AssetName.text() != '':
self.newasset.emit(self.text_AssetName.text())
self.text_AssetName.clear()
def updateDB(self):
self.updateSceneList()
self.updateShotList()
def updateSceneList(self):
BAD_DIRS = ['assets', 'animatic']
self.dropdown_scene.clear()
dirs = next(os.walk(os.path.join(config.serverDir(), 'working', 'scenes')))[1]
dirs.sort()
for dirname in dirs:
if dirname not in BAD_DIRS:
self.dropdown_scene.addItem(dirname)
config.setSeq(self.dropdown_scene.currentText())
def updateShotList(self):
config.setSeq(self.dropdown_scene.currentText())
self.dropdown_shot.clear()
if os.getenv('SEQ') != '':
for i_shot, t_shot in enumerate(sorted(next(os.walk(os.path.join(config.serverDir(), 'working', 'scenes', os.getenv('SEQ'))))[1])):
self.dropdown_shot.addItem(t_shot)
config.setShot(self.dropdown_shot.currentText())
self.updateTags()
else:
self.canLaunch()
def emitlaunch(self):
# Return the stage index to the launcher, add one to compensate for zero-based index
config.setStage(self.getStageIndex())
self.launch.emit(self.getStageIndex(), self.dropdown_tag.currentText())
def emitcreate(self):
config.setStage(self.getStageIndex())
self.createnew.emit(self.getStageIndex())
def emitrender(self):
config.setStage(self.getStageIndex())
self.renderscene.emit(self.getStageIndex(), self.dropdown_tag.currentText(), self.text_camera.text())
def getTags(self):
# Grab all the files in given stage directory, unbiased of file type
files = glob(os.path.join(config.stageDir(self.getStageIndex()), '*.*'))
sort = []
for i, n in enumerate(files):
# Add all found file variables to a list
filename, ext = os.path.splitext(n)
# print ext
splt = os.path.basename(n).split('_')
if len(splt) >= 2:
sort.append(splt[2])
# Sets are DISTINCT objects, no repeats, removes duplicate names
distinct = set(sort)
return distinct
def updateTags(self):
self.dropdown_tag.clear()
for i_tag, t_tag in enumerate(self.getTags()):
self.dropdown_tag.addItem(t_tag)
# Whenever tags update, we need to update whether or not there is existing file
self.canLaunch()
def getStageIndex(self):
return int(self.dropdown_stage.currentIndex())
def canLaunch(self):
if self.dropdown_tag.count() >= 1:
self.btn_launch.setEnabled(True)
# self.btn_render.setEnabled(True)
else:
self.btn_launch.setDisabled(True)
# self.btn_render.setDisabled(True)
def updateEnv(self):
if self.dropdown_shot.currentText() != '':
config.setShot(self.dropdown_shot.currentText())
self.updateTags()
class QDialog_FolderCreate(QDialog):
sendirs = Signal(str, str)
def __init__(self, parent=None):
super(QDialog_FolderCreate, self).__init__(parent)
self.sceneLabel = QLabel("Scene: ")
self.sceneLabel.setMinimumWidth(40)
self.sceneName = QLineEdit()
self.sceneName.setMaximumWidth(150)
self.sceneName.setPlaceholderText("Type Scene Here...")
self.shotLabel = QLabel("Shot: ")
self.shotLabel.setMinimumWidth(40)
self.shotName = QLineEdit()
self.shotName.setMaximumWidth(150)
self.shotName.setPlaceholderText("Type Shot Here...")
self.submitbtn = QPushButton("Create")
self.quitbtn = QPushButton("Quit")
self.quitbtn.clicked.connect(self.close)
hbox_Scene = QHBoxLayout()
hbox_Scene.addWidget(self.sceneLabel)
hbox_Scene.addWidget(self.sceneName)
hbox_Scene.addStretch(1)
hbox_Shot = QHBoxLayout()
hbox_Shot.addWidget(self.shotLabel)
hbox_Shot.addWidget(self.shotName)
hbox_Shot.addStretch(1)
hbox_Cmd = QHBoxLayout()
hbox_Cmd.addStretch(1)
hbox_Cmd.addWidget(self.submitbtn)
hbox_Cmd.addWidget(self.quitbtn)
# Create layout and add widgets
layout = QVBoxLayout()
layout.addLayout(hbox_Scene)
layout.addLayout(hbox_Shot)
layout.addLayout(hbox_Cmd)
# Set dialog layout
self.setLayout(layout)
# Add submitbtn signal
self.submitbtn.clicked.connect(self.pressbtn)
def pressbtn(self):
self.sendirs.emit(self.sceneName.text(), self.shotName.text())
self.sceneName.clear()
self.shotName.clear()
class Launcher(QMainWindow):
def __init__(self, parent=None):
super(Launcher, self).__init__(parent)
self.mainlayout = WindowLayout()
self.initUI()
def initUI(self):
# CREATE MENU BAR
exitAction = QAction('&Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Exit application')
exitAction.triggered.connect(self.close)
shotAction = QAction('&Create Shot', self)
shotAction.setStatusTip('Build out folder structure for a new shot')
shotAction.triggered.connect(self.create_dir)
syncAction = QAction('&Sync Project', self)
syncAction.setStatusTip('Sync Local Project with Server')
syncAction.triggered.connect(self.synclocal)
self.statusBar()
menubar = self.menuBar()
fileMenu = menubar.addMenu('&File')
buildMenu = menubar.addMenu('&Build')
fileMenu.addAction(exitAction)
buildMenu.addAction(shotAction)
buildMenu.addAction(syncAction)
self.setCentralWidget(self.mainlayout)
self.show()
def create_dir(self):
self.modalFolder = QDialog_FolderCreate()
self.modalFolder.setWindowTitle('Create')
self.modalFolder.show()
self.modalFolder.sendirs.connect(self.send_to_make)
def synclocal(self):
BasketBuilder.rep_prod_dir()
@Slot(str, str)
def send_to_make(self, scene, shot):
BasketBuilder.make_prod_dir(scene, shot)
BasketBuilder.make_frame_dir(scene, shot)
self.mainlayout.updateDB() | Hartman-/Basket | basket/gui/GUI_Launch.py | Python | bsd-3-clause | 15,300 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import warnings
from pymatgen.core.bonds import (
CovalentBond,
get_bond_length,
get_bond_order,
obtain_all_bond_lengths,
)
from pymatgen.core.periodic_table import Element
from pymatgen.core.sites import Site
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Jul 26, 2012"
class CovalentBondTest(unittest.TestCase):
def setUp(self):
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_length(self):
site1 = Site("C", [0, 0, 0])
site2 = Site("H", [0, 0.7, 0.6])
self.assertAlmostEqual(CovalentBond(site1, site2).length, 0.92195444572928864)
def test_get_bond_order(self):
site1 = Site("C", [0, 0, 0])
site2 = Site("H", [0, 0, 1.08])
self.assertAlmostEqual(CovalentBond(site1, site2).get_bond_order(), 1)
bond = CovalentBond(Site("C", [0, 0, 0]), Site("Br", [0, 0, 2]))
self.assertAlmostEqual(bond.get_bond_order(0.5, 1.9), 0.894736842105263)
def test_is_bonded(self):
site1 = Site("C", [0, 0, 0])
site2 = Site("H", [0, 0, 1])
self.assertTrue(CovalentBond.is_bonded(site1, site2))
site2 = Site("H", [0, 0, 1.5])
self.assertFalse(CovalentBond.is_bonded(site1, site2))
site1 = Site("U", [0, 0, 0])
self.assertRaises(ValueError, CovalentBond.is_bonded, site1, site2)
self.assertTrue(CovalentBond.is_bonded(site1, site2, default_bl=2))
def test_str(self):
site1 = Site("C", [0, 0, 0])
site2 = Site("H", [0, 0.7, 0.6])
self.assertIsNotNone(CovalentBond(site1, site2))
class FuncTest(unittest.TestCase):
def test_get_bond_length(self):
self.assertAlmostEqual(get_bond_length("C", "C", 1), 1.54)
self.assertAlmostEqual(get_bond_length("C", "C", 2), 1.34)
self.assertAlmostEqual(get_bond_length("C", "H", 1), 1.08)
self.assertEqual(get_bond_length("C", "H", 2), 0.95)
self.assertAlmostEqual(get_bond_length("C", "Br", 1), 1.85)
def test_obtain_all_bond_lengths(self):
self.assertDictEqual(obtain_all_bond_lengths("C", "C"), {1.0: 1.54, 2.0: 1.34, 3.0: 1.2})
self.assertRaises(ValueError, obtain_all_bond_lengths, "Br", Element("C"))
self.assertDictEqual(obtain_all_bond_lengths("C", Element("Br"), 1.76), {1: 1.76})
bond_lengths_dict = obtain_all_bond_lengths("C", "N")
bond_lengths_dict[4] = 999
self.assertDictEqual(obtain_all_bond_lengths("C", "N"), {1.0: 1.47, 2.0: 1.3, 3.0: 1.16})
def test_get_bond_order(self):
self.assertAlmostEqual(get_bond_order("C", "C", 1), 3)
self.assertAlmostEqual(get_bond_order("C", "C", 1.2), 3)
self.assertAlmostEqual(get_bond_order("C", "C", 1.25), 2.642857142857143)
self.assertAlmostEqual(get_bond_order("C", "C", 1.34), 2)
self.assertAlmostEqual(get_bond_order("C", "C", 1.4), 1.7) # bond length in benzene
self.assertAlmostEqual(get_bond_order("C", "C", 1.54), 1)
self.assertAlmostEqual(get_bond_order("C", "C", 2.5), 0)
self.assertAlmostEqual(get_bond_order("C", "C", 9999), 0)
self.assertAlmostEqual(get_bond_order("C", "Br", 1.9, default_bl=1.9), 1)
self.assertAlmostEqual(get_bond_order("C", "Br", 2, default_bl=1.9), 0.7368421052631575)
self.assertAlmostEqual(get_bond_order("C", "Br", 1.9, tol=0.5, default_bl=1.9), 1)
self.assertAlmostEqual(get_bond_order("C", "Br", 2, tol=0.5, default_bl=1.9), 0.894736842105263)
self.assertRaises(ValueError, get_bond_order, "C", "Br", 1.9)
self.assertAlmostEqual(get_bond_order("N", "N", 1.25), 2)
if __name__ == "__main__":
unittest.main()
| gmatteo/pymatgen | pymatgen/core/tests/test_bonds.py | Python | mit | 3,958 |
import os
import pytest
import re
hostenv = os.environ['SECUREDROP_TESTINFRA_TARGET_HOST']
@pytest.mark.parametrize('sysctl_opt', [
('net.ipv4.conf.all.accept_redirects', 0),
('net.ipv4.conf.all.accept_source_route', 0),
('net.ipv4.conf.all.rp_filter', 1),
('net.ipv4.conf.all.secure_redirects', 0),
('net.ipv4.conf.all.send_redirects', 0),
('net.ipv4.conf.default.accept_redirects', 0),
('net.ipv4.conf.default.accept_source_route', 0),
('net.ipv4.conf.default.rp_filter', 1),
('net.ipv4.conf.default.secure_redirects', 0),
('net.ipv4.conf.default.send_redirects', 0),
('net.ipv4.icmp_echo_ignore_broadcasts', 1),
('net.ipv4.ip_forward', 0),
('net.ipv4.tcp_max_syn_backlog', 4096),
('net.ipv4.tcp_syncookies', 1),
('net.ipv6.conf.all.disable_ipv6', 1),
('net.ipv6.conf.default.disable_ipv6', 1),
('net.ipv6.conf.lo.disable_ipv6', 1),
])
def test_sysctl_options(Sysctl, Sudo, sysctl_opt):
"""
Ensure sysctl flags are set correctly. Most of these checks
are disabling IPv6 and hardening IPv4, which is appropriate
due to the heavy use of Tor.
"""
with Sudo():
assert Sysctl(sysctl_opt[0]) == sysctl_opt[1]
def test_dns_setting(File):
"""
Ensure DNS service is hard-coded in resolv.conf config.
"""
f = File('/etc/resolvconf/resolv.conf.d/base')
assert f.is_file
assert f.user == "root"
assert f.group == "root"
assert oct(f.mode) == "0644"
assert f.contains('^nameserver 8\.8\.8\.8$')
@pytest.mark.parametrize('kernel_module', [
'bluetooth',
'iwlwifi',
])
def test_blacklisted_kernel_modules(Command, File, Sudo, kernel_module):
"""
Test that unwanted kernel modules are blacklisted on the system.
Mostly these checks are defense-in-depth approaches to ensuring
that wireless interfaces will not work.
"""
with Sudo():
assert kernel_module not in Command("lsmod").stdout
f = File("/etc/modprobe.d/blacklist.conf")
assert f.contains("^blacklist {}$".format(kernel_module))
@pytest.mark.skipif(hostenv.startswith('mon'),
reason="Monitor Server does not have swap disabled yet.")
def test_swap_disabled(Command):
"""
Ensure swap space is disabled. Prohibit writing memory to swapfiles
to reduce the threat of forensic analysis leaking any sensitive info.
"""
c = Command.check_output('swapon --summary')
# A leading slash will indicate full path to a swapfile.
assert not re.search("^/", c, re.M)
# Expect that ONLY the headers will be present in the output.
rgx = re.compile("Filename\s*Type\s*Size\s*Used\s*Priority")
assert re.search(rgx, c)
| micahflee/securedrop | testinfra/common/test_system_hardening.py | Python | agpl-3.0 | 2,661 |
#! /usr/bin/env python
# encoding: utf-8
import TaskGen
from TaskGen import taskgen,feature
from Constants import*
TaskGen.declare_chain(name='luac',rule='${LUAC} -s -o ${TGT} ${SRC}',ext_in='.lua',ext_out='.luac',reentrant=0,install='LUADIR',)
def init_lua(self):
self.default_chmod=O755
def detect(conf):
conf.find_program('luac',var='LUAC',mandatory=True)
feature('lua')(init_lua)
| micove/libdesktop-agnostic | wafadmin/Tools/lua.py | Python | lgpl-2.1 | 388 |
# -*- coding: utf-8 -*-
#FUNCION PARA EL PAREO CON LATCH
def pair():
import os, json, latch
appid = input('Ingrese el Applicatrion ID:')
while len(appid) == 0:
print('Intente nuevamente...')
appid = input('Ingrese el Applicatrion ID:')
seckey = input('Ingrese Secret Key:')
while len(seckey) == 0:
print('Intente nuevamente...')
seckey = input('Ingrese Secret Key:')
api = latch.Latch(appid,seckey)
pair_code = input('Ingrese el Pairing Code entregado por su celular:')
while len(pair_code) == 0:
print('Intente nuevamente...')
pair_code = input('Ingrese el Pairing Code entregado por su celular:')
response = api.pair(pair_code)
responseData = str(response.get_data())
responseData1 = responseData[15:-2]
responseError = response.get_error()
#ESCRIBIMOS LOS DATOS DEL PAREO EN UN ARCHIVO
try:
archivo = open('parear.data','w')
archivo.write(appid + '\n')
archivo.write(seckey + '\n')
archivo.write(responseData1)
input('Operacion realizada, pulse una tecla para continuar.. ')
archivo.close
except IOError:
print ('El archivo parear.data no existe o no tiene los permisos adecuados')
if responseError != "" :
print ('Error:' , responseError)
input('Pulse una tecla para salir.. ')
try:
salida=json.dumps(responseData)
except (TypeError, ValueError) as err:
print ('ERROR:', err)
| maxssestepa/latchwake | parear.py | Python | lgpl-2.1 | 1,368 |
from django.utils.http import http_date
class ConditionalGetMiddleware(object):
"""
Handles conditional GET operations. If the response has a ETag or
Last-Modified header, and the request has If-None-Match or
If-Modified-Since, the response is replaced by an HttpNotModified.
Also sets the Date and Content-Length response-headers.
"""
def process_response(self, request, response):
response['Date'] = http_date()
if not response.has_header('Content-Length'):
response['Content-Length'] = str(len(response.content))
if response.has_header('ETag'):
if_none_match = request.META.get('HTTP_IF_NONE_MATCH', None)
if if_none_match == response['ETag']:
# Setting the status is enough here. The response handling path
# automatically removes content for this status code (in
# http.conditional_content_removal()).
response.status_code = 304
if response.has_header('Last-Modified'):
if_modified_since = request.META.get('HTTP_IF_MODIFIED_SINCE', None)
if if_modified_since == response['Last-Modified']:
# Setting the status code is enough here (same reasons as
# above).
response.status_code = 304
return response
class SetRemoteAddrFromForwardedFor(object):
"""
Middleware that sets REMOTE_ADDR based on HTTP_X_FORWARDED_FOR, if the
latter is set. This is useful if you're sitting behind a reverse proxy that
causes each request's REMOTE_ADDR to be set to 127.0.0.1.
Note that this does NOT validate HTTP_X_FORWARDED_FOR. If you're not behind
a reverse proxy that sets HTTP_X_FORWARDED_FOR automatically, do not use
this middleware. Anybody can spoof the value of HTTP_X_FORWARDED_FOR, and
because this sets REMOTE_ADDR based on HTTP_X_FORWARDED_FOR, that means
anybody can "fake" their IP address. Only use this when you can absolutely
trust the value of HTTP_X_FORWARDED_FOR.
"""
def process_request(self, request):
try:
real_ip = request.META['HTTP_X_FORWARDED_FOR']
except KeyError:
return None
else:
# HTTP_X_FORWARDED_FOR can be a comma-separated list of IPs. The
# client's IP will be the first one.
real_ip = real_ip.split(",")[0].strip()
request.META['REMOTE_ADDR'] = real_ip
| Shrews/PyGerrit | webapp/django/middleware/http.py | Python | apache-2.0 | 2,469 |
# TinyMCE editor dialog that works with pyjd as well as pyjs.
# add the following to the loader file:
# <script type="text/javascript"
# src="./tinymce/jscripts/tiny_mce/tiny_mce.js"></script>
#
# note: versions of tinymce from 3.0 to at least 3.5b1 have a bug
# where a 2nd editor instance in firefox will flicker, go blank,
# and lose keyboard focus.
#
from pyjamas.ui.TextArea import TextArea
from pyjamas.ui.HTML import HTML
from pyjamas.ui.Button import Button
from pyjamas.ui.DockPanel import DockPanel
from pyjamas.ui.DialogWindow import DialogWindow
from pyjamas.ui import HasAlignment
from pyjamas.ui.HorizontalPanel import HorizontalPanel
from pyjamas import DeferredCommand
from pyjamas.Timer import Timer
from pyjamas import DOM
from __pyjamas__ import doc
_editor_id = 0
class EditDialogWindow(DialogWindow):
def __init__(self, app):
self.app = app
DialogWindow.__init__(
self, modal=False,
minimize=True, maximize=True, close=True,
)
self.closeButton = Button("Close", self)
self.saveButton = Button("Save", self)
self.setText("Sample DialogWindow with embedded image")
self.msg = HTML("", True)
global _editor_id
_editor_id += 1
editor_id = "editor%d" % _editor_id
#self.ht = HTML("", ID=editor_id)
self.txt = TextArea(Text="", VisibleLines=30, CharacterWidth=80,
ID=editor_id)
dock = DockPanel()
dock.setSpacing(4)
hp = HorizontalPanel(Spacing="5")
hp.add(self.saveButton)
hp.add(self.closeButton)
dock.add(hp, DockPanel.SOUTH)
dock.add(self.msg, DockPanel.NORTH)
dock.add(self.txt, DockPanel.CENTER)
dock.setCellHorizontalAlignment(hp, HasAlignment.ALIGN_RIGHT)
dock.setCellWidth(self.txt, "100%")
dock.setWidth("100%")
self.setWidget(dock)
self.editor_id = editor_id
self.editor_created = False
def add_tinymce(self):
# for storing the results when available
iframe = DOM.createElement("iframe")
DOM.setElemAttribute(iframe, "id", "__edit_%s" % self.editor_id)
DOM.setElemAttribute(iframe, "style", "display:none")
doc().body.appendChild(iframe)
# activate tinymce
new_script = DOM.createElement("script")
new_script.innerHTML = """
tinyMCE.init({
// General options
mode : "textareas",
theme : "simple",
// Theme options
theme_advanced_buttons1 : "bold,italic,underline,strikethrough,|,justifyleft,justifycenter,justifyright,justifyfull,|,styleselect,formatselect,|,table,removeformat",
theme_advanced_buttons2 : "",
theme_advanced_buttons3 : "",
theme_advanced_buttons4 : "",
theme_advanced_toolbar_location : "top",
theme_advanced_toolbar_align : "left",
theme_advanced_statusbar_location : "bottom",
theme_advanced_resizing : true,
});
"""
ih = """
var ed = new tinymce.Editor('%s',{
mode : "none",
theme : "advanced",
plugins : "inlinepopups",
theme_advanced_buttons1 : "bold,italic,underline,strikethrough,|,justifyleft,justifycenter,justifyright,justifyfull,|,formatselect,|,table,image,removeformat",
theme_advanced_buttons2 : "",
theme_advanced_buttons3 : "",
theme_advanced_buttons4 : "",
theme_advanced_toolbar_location : "top",
theme_advanced_toolbar_align : "left",
theme_advanced_statusbar_location : "bottom",
theme_advanced_resizing : true
});
ed.render();
ed.load();
tinymce.add(ed);
""" % self.editor_id
print new_script.innerHTML
DOM.setElemAttribute(new_script, "type", "text/javascript")
doc().body.appendChild(new_script)
def load(self, token, fname, data):
left = 50 # self.fDialogButton.getAbsoluteLeft() + 10
top = 50 # self.fDialogButton.getAbsoluteTop() + 10
self.setPopupPosition(left, top)
self.show()
self.token = token
self.fname = fname
self.msg.setHTML("<center>This is an example of a standard dialog box component.<br> You can put pretty much anything you like into it,<br>such as the following image '%s':</center>" % fname)
self.txt.setText(data)
if not self.editor_created:
self.editor_created = True
if self.fname.endswith(".html"):
Timer(1500, notify=self._load_tinymce)
def _load_tinymce(self, timer):
self.add_tinymce()
#self.load_tinymce()
def load_tinymce(self):
# activate tinymce
new_script = DOM.createElement("script")
new_script.innerHTML = """
var ed = tinyMCE.get('%s');
ed.init();
ed.render();
""" % self.editor_id
print new_script.innerHTML
DOM.setElemAttribute(new_script, "type","text/javascript")
doc().body.appendChild(new_script)
def transfer_tinymce(self):
new_script = DOM.createElement("script")
new_script.innerHTML = """
var ed = tinyMCE.get('%s');
var data = ed.getContent({'format': 'raw'});
frame = document.getElementById('__edit_%s');
frame.innerText = data;
ed.save();
ed.remove();
""" % (self.editor_id, self.editor_id)
self.editor_created = False
DOM.setElemAttribute(new_script, "type","text/javascript")
doc().body.appendChild(new_script)
self.hide()
t = Timer(notify=self)
t.scheduleRepeating(1000)
def onTimer(self, timer):
iframe = doc().getElementById("__edit_%s" % self.editor_id)
print dir(iframe)
txt = iframe.innerText
if not txt:
return
timer.cancel()
doc().body.removeChild(iframe)
self.app.save_page(self.token, self.fname, txt)
def onClick(self, sender):
if sender == self.saveButton:
if self.fname.endswith(".html"):
self.transfer_tinymce()
else:
txt = self.txt.getText()
self.app.save_page(self.token, self.fname, txt)
self.hide()
else:
self.hide()
| minghuascode/pyj | addons/TinyMCEditor.py | Python | apache-2.0 | 6,194 |
import pytest
# Write your own tests below!
# What logic would make sense in the tests below to ensure our data are "legal"?
# And how do we get access to the data in the other file?
def test_enforce_drinking_ages():
assert False, 'You need to write this test!
| Destaneon/python-fundamentals | challenges/04-Functions/test_D_your_own_test.py | Python | apache-2.0 | 270 |
def make_key(*args):
return "ti:" + ":".join(args)
def make_refset_key(pmid):
return make_key("article", pmid, "refset") | total-impact/biomed | db.py | Python | mit | 131 |
import unittest
from frisbee import *
class AnalysisTestCase(unittest.TestCase):
"""Tests all the analysis functions"""
def __get_cred_dict(self, catch, drop, throw, snatch, foul):
"""Returns a dictionary of player creds as expected from analysis"""
return dict(zip(["catch", "drop", "throw", "snatch", "foul"],
[catch, drop, throw, snatch, foul]))
def test_for_1player_drop(self):
"""Test for 1 player 1 drop"""
# 1 player drop
gs = "PL1*"
res = {"PL1" : self.__get_cred_dict(0,1,0,0,0)}
self.assertDictEqual(analyse_game_string(gs), res)
def test_for_2player_drop(self):
"""Test for 2 players 1 throw 1 catch and 1 drop"""
# 2 player drop
gs = "PL1-PL2*"
res = {"PL1" : self.__get_cred_dict(0,0,1,0,0),
"PL2" : self.__get_cred_dict(1,1,0,0,0)}
self.assertDictEqual(analyse_game_string(gs), res)
def test_for_throw_catch_drop(self):
"""Test for multiple passes and a drop"""
# Sucessful pass
gs = "PL1-PL2-PL3*"
res = {"PL1" : self.__get_cred_dict(0,0,1,0,0),
"PL2" : self.__get_cred_dict(1,0,1,0,0),
"PL3" : self.__get_cred_dict(1,1,0,0,0)}
self.assertDictEqual(analyse_game_string(gs), res)
def test_for_repeatitive_pass(self):
"""Test for with repeatitive pass to the same player"""
gs = "SPF1-SPF2-SPF1-SPF2-SPF1*"
res = {"SPF1" : self.__get_cred_dict(2,1,2,0,0),
"SPF2": self.__get_cred_dict(2,0,2,0,0)}
self.assertDictEqual(analyse_game_string(gs), res)
def test_for_snatch(self):
"""Test for accounting snatch with a sucessful pass"""
gs = "PL1(S)-PL2-PL3*"
res = {"PL1": self.__get_cred_dict(0,0,1,1,0),
"PL2": self.__get_cred_dict(1,0,1,0,0),
"PL3": self.__get_cred_dict(1,1,0,0,0)}
self.assertDictEqual(analyse_game_string(gs), res)
def test_for_snatch_drop(self):
"""Test for accounting snatch that was dropped"""
gs = "PL1(S)*"
res = {"PL1": self.__get_cred_dict(0,1,0,1,0)}
self.assertDictEqual(analyse_game_string(gs), res)
def test_for_foul(self):
"""Test for counting fouls"""
gs = "PL1-PL2-PL3(F)"
res = {"PL1": self.__get_cred_dict(0,0,1,0,0),
"PL2": self.__get_cred_dict(1,0,1,0,0),
"PL3": self.__get_cred_dict(1,0,0,0,1)}
self.assertDictEqual(analyse_game_string(gs), res)
def test_for_ignore_points(self):
"""Test for the player analysis to ignore the points"""
gs = "PL1-PL2(P)"
res = {"PL1" : self.__get_cred_dict(0,0,1,0,0),
"PL2" : self.__get_cred_dict(1,0,0,0,0)}
self.assertDictEqual(analyse_game_string(gs), res)
def test_for_realworld(self):
"""Tests a real world multiline string"""
gs = 'DHA-RIY*\nRIY-DHA*\nDHA-RIY\nDHA-RIY-SHE*\nDHA-RIY-SHE-DHA*\nDHA-SHE-RIY*\nDHA*\n'
res = {"DHA": self.__get_cred_dict(2,3,5,0,0),
"RIY": self.__get_cred_dict(5,2,4,0,0),
"SHE": self.__get_cred_dict(3,1,2,0,0),
}
self.assertDictEqual(analyse_game_string(gs), res)
class PointsTestCase(unittest.TestCase):
"""Check whether the points are calculated correctly"""
def test_point_count(self):
"""Test for get_points()"""
gs = 'ABS-DSA-POT(P)\nADS-HSD*\nHJS-AUD-HSD*\nPSD-ASD(P)\n'
self.assertEqual(get_points(gs), 2)
class ParserTestCase(unittest.TestCase):
"""Test for parse_game_file() in frisbee.py """
def test_is_file_empty(self):
"""Tests whether the function throws an error if the file has no data"""
with self.assertRaises(ParsingError):
parse_gamefile("test/data/empty_game.txt")
def test_is_file_corrupt(self):
"""Tests whether the fucntion throws an error for insufficient data"""
with self.assertRaises(ParsingError):
parse_gamefile("test/data/1team_game.txt")
def test_is_parsing_correct(self):
"""Tests whether the function returns the expected result after parsing"""
resp = parse_gamefile("test/data/full_game.txt")
data = {"team1" : "TEAM A", "team2" : "TEAM B",
"points1" : 0, "points2" : 1,
"string1" : "JUS*\nJUS-KAJ*\nSUR*\n",
"string2" : "DAN-NIM*\nDAN-NIM(P)\n"
}
self.assertDictEqual(resp, data)
if __name__ == "__main__": # pragma: no cover
unittest.main()
| tecoholic/frisbee | test/test_frisbee.py | Python | mit | 4,594 |
# -*- encoding: utf-8 -*-
import pooler, time, base64
from osv import fields, osv
AVAILABLE_PRIORITIES = [
('1', '最高'),
('2', '高'),
('3', '中'),
('4', '低'),
('5', '最低'),
]
class fg_jobcontent(osv.osv):
_name = "fg_jobcontent"
_description = "工作项目进度表"
_columns = {
'name': fields.char('项目名称', size=128, select=True, required=True,),
"executor": fields.many2one('res.users','执行人', required=True, select=True,),
'charge':fields.char('下单人', size=128,),
"date_start":fields.date("下单时间",required=False,),
"date_end":fields.date("实际完成时间",),
'note': fields.text('备注',size=512),
"accept": fields.char('对接人',size=128,),
"explain":fields.text('说明(要求)',),
"rate":fields.text('工作进度',),
"end_time": fields.date('要求完成时间',),
'jobstate': fields.selection([('draft', '未开始'),('processing','进行中'),('done','已完成')], '状态',),
}
_defaults = {
'date_start':fields.date.context_today,
'executor':lambda obj, cr, uid, context: uid,
'jobstate':'draft',
}
class task(osv.osv):
_name = "fg_project.task"
_description = "产品物料推进任务"
def _get_img(self, cr, uid, ids, name, arg, context=None):
res = {}
image = None
eid = self.read(cr, uid, ids, ['executor'])
cr.execute('SELECT id FROM resource_resource WHERE user_id = %s', (eid[0]["executor"][0],))
reid = cr.fetchone()
obj = self.pool.get("resource.resource")
objhr = self.pool.get("hr.employee")
if reid:
cr.execute('SELECT photo FROM hr_employee WHERE resource_id = %s', (reid[0],))
image = cr.fetchone()
if image :image = image[0]
res[ids[0]]= image
return res
_columns = {
'name': fields.char('任务',select=True, required=True, size=128 ),
"project": fields.many2one('fg_schedule.project','项目',select=True, required=True, ),
"executor": fields.many2one('res.users','执行人', required=True, select=True),
"executor_img": fields.function(_get_img, method=True, string='头像', type='binary', store=True, ),
"order": fields.char('下单人',size=128, select=True),
"order_time": fields.date('下单时间', ),
'detil': fields.text('工作摘要',size=512, ),
"need_endtime": fields.date('截止时间'),
"end_time": fields.date('完成时间',readonly=True, ),
"accept": fields.char('对接人',size=128, ),
'state': fields.selection([('draft', '未开始'), ('processing', '执行中'), ('cancelled','取消(删除)'), ('done','完成')], '推进情况',required=True),
'stated': fields.selection([('draft', '未开始'), ('processing', '执行中'), ('cancelled','取消(删除)'), ('done','完成')], '推进情况',required=True, readonly=True ),
"explain":fields.text('项目说明(要求)', ),
"rate":fields.float('项目进度',),
"note": fields.text('备注',),
'img':fields.binary("效果展示",readonly=True,),
#-------------------------------------------------------
'colour':fields.one2many('product.colour', 'colour_schedule', '产品颜色'),
'barcode':fields.one2many('bar.code', 'barcode', '条形码申报'),
'productbook':fields.one2many('product.book', 'book_schedule', '产品说明书'),
'colorboard':fields.one2many('color.board', 'colorboard', '色板确认'),
'accessorypurchaser':fields.one2many('accessory.purchaser', 'accessorypurchaser', '辅料采购'),
'screenmaking':fields.one2many('screen.making', 'screenmaking', '网版制作'),
'productsample':fields.one2many('product.sample', 'productsample', '产品打样'),
'productpack':fields.one2many('product.pack', 'productpack', '产品包装'),
'productshoot':fields.one2many('product.shoot', 'productshoot', '产品拍摄及修图'),
'producttag':fields.one2many('product.tag', 'producttag', '吊牌,插卡,标签'),
'productopp':fields.one2many('product.opp', 'productopp', 'OPP袋,低压袋'),
'productcontainer':fields.one2many('product.container', 'productcontainer', '产品外箱'),
'productposter':fields.one2many('product.poster', 'productposter', '海报'),
'productelse':fields.one2many('product.else', 'productelse', '其他'),
}
_sql_constraints = [
('name', 'unique (name)', u'该任务已存在 !'),
]
_defaults = {
'need_endtime':fields.date.context_today,
'order_time':fields.date.context_today,
'state':'draft',
'stated':'draft',
'rate':0.00,
'executor':lambda obj, cr, uid, context: uid,
}
def change_stage(self, cr, uid, ids, *args):
state = self.read(cr, uid, ids, ["stated"], context=None)
if not state:self.write(cr, uid, ids, {'state': 'draft','stated':'draft','end_time':False, 'rate':0})
if state[0]["stated"] == 'draft':self.write(cr, uid, ids, {'state': 'processing','stated':'processing','end_time':False, 'rate':0})
elif state[0]["stated"] == 'processing':self.write(cr, uid, ids, {'state': 'done','stated':'done','end_time':time.strftime("%Y-%m-%d %H:%M:%S"),'rate':100})
elif state[0]["stated"] == 'done':self.write(cr, uid, ids, {'state': 'draft','stated':'draft','end_time':False, 'rate':0})
return True
def case_draft(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {'state': 'draft','stated':'draft','end_time':False, 'rate':0})
return True
def case_cancelled(self, cr, uid, ids, *args):
self.unlink(cr, uid, ids)
return True
def case_processing(self, cr, uid, ids, *args):
self.write(cr, uid, ids,{'state': 'processing','stated':'processing','end_time':False, 'rate':0})
return True
def case_done(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {'state': 'done','stated':'done','end_time':time.strftime("%Y-%m-%d %H:%M:%S"),'rate':100})
return True
def button(self, cr, uid, ids, *args):
return True
task()
class project(osv.osv):
_name = "fg_schedule.project"
_description = "配置项目"
def create(self, cr, uid, vals, context={}):
result = super(project, self).create(cr, uid, vals, context=context)
print result,'------------'
obj = self.pool.get('fg_project.task')
obj.create(cr, uid, {'name':'产品颜色','project':result,'executor':uid,'state':'draft','stated':'draft','need_endtime':None}, context=context)
obj.create(cr, uid, {'name':'条形码申报','project':result,'executor':uid,'state':'draft','stated':'draft','need_endtime':None}, context=context)
obj.create(cr, uid, {'name':'产品说明书','project':result,'executor':uid,'state':'draft','stated':'draft','need_endtime':None}, context=context)
obj.create(cr, uid, {'name':'色板确认','project':result,'executor':uid,'state':'draft','stated':'draft','need_endtime':None}, context=context)
obj.create(cr, uid, {'name':'辅料采购','project':result,'executor':uid,'state':'draft','stated':'draft','need_endtime':None}, context=context)
obj.create(cr, uid, {'name':'网版制作','project':result,'executor':uid,'state':'draft','stated':'draft','need_endtime':None}, context=context)
obj.create(cr, uid, {'name':'产品打样','project':result,'executor':uid,'state':'draft','stated':'draft','need_endtime':None}, context=context)
obj.create(cr, uid, {'name':'产品包装','project':result,'executor':uid,'state':'draft','stated':'draft','need_endtime':None}, context=context)
obj.create(cr, uid, {'name':'产品拍摄及修图','project':result,'executor':uid,'state':'draft','stated':'draft','need_endtime':None}, context=context)
obj.create(cr, uid, {'name':'吊牌,插卡,标签','project':result,'executor':uid,'state':'draft','stated':'draft','need_endtime':None}, context=context)
obj.create(cr, uid, {'name':'OPP袋,低压袋','project':result,'executor':uid,'state':'draft','stated':'draft','need_endtime':None}, context=context)
obj.create(cr, uid, {'name':'产品外箱','project':result,'executor':uid,'state':'draft','stated':'draft','need_endtime':None}, context=context)
obj.create(cr, uid, {'name':'海报','project':result,'executor':uid,'state':'draft','stated':'draft','need_endtime':None}, context=context)
obj.create(cr, uid, {'name':'其他','project':result,'executor':uid,'state':'draft','stated':'draft','need_endtime':None}, context=context)
return result
_columns = {
'name': fields.char('产品名称', size=128, select=True, required=True,readonly=True,states={'draft':[('readonly',False)]}),
'to':fields.char('研发部对接人', size=128, select=True,readonly=True,states={'draft':[('readonly',False)]}),
'charge':fields.char('项目负责人', size=128,readonly=True,states={'draft':[('readonly',False)]}),
"date_start":fields.date("开始时间",required=True,readonly=True,states={'draft':[('readonly',False)]}),
"date_end":fields.date("完成时间",readonly=True,states={'draft':[('readonly',False)]}),
'note': fields.char('说明',readonly=True,states={'draft':[('readonly',False)]},size=512),
'img':fields.binary("产品图片",readonly=True,states={'draft':[('readonly',False)]}),
'state': fields.selection([('draft', '开启'),('done','结束')], '项目状态',),
}
_defaults = {
'date_start':fields.date.context_today,
'state':lambda *a:'draft',
}
_order = "date_start desc"
_sql_constraints = [
('name', 'unique (name)', u'产品名称已存在 !'),
]
def case_done(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {'state': 'done','date_end':time.strftime("%Y-%m-%d %H:%M:%S")})
return True
project()
class conf_task(osv.osv):
_name = "conf_task"
_description = "配置任务名称"
_columns = {
'name': fields.char('任务',size=128,select=True,),
'priority': fields.selection(AVAILABLE_PRIORITIES, 'Priority', select=True),
}
conf_task()
| Johnzero/erp | openerp/addons/fg_schedule/fg_schedule.py | Python | agpl-3.0 | 10,893 |
# Copyright © 2017-2019 Zuercher Hochschule fuer Angewandte Wissenschaften.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import connexion
from adapters.datasource import STORE
from adapters.resources import RM
from esm.controllers import _version_ok
from esm.models.binding_request import BindingRequest
from esm.models.binding_response import BindingResponse
from esm.models.empty import Empty
from esm.models.last_operation import LastOperation
from esm.models.service_instance import ServiceInstance
from esm.models.service_request import ServiceRequest
from esm.models.service_response import ServiceResponse
from esm.models.service_type import ServiceType
from esm.models.update_operation_response import UpdateOperationResponse
from esm.models.update_request import UpdateRequest
# TODO need a converged state model for the SM - see info methods of backend
def create_service_instance(instance_id, service, accept_incomplete=None):
"""
Provisions a service instance
When the broker receives a provision request from a client, it should synchronously take whatever action is
necessary to create a new service resource for the developer. The result of provisioning varies by service
type, although there are a few common actions that work for many services. Supports asynchronous operations.'
:param instance_id: 'The instance_id of a service instance is provided by the client. This ID will be used for
future requests (bind and deprovision), so the broker must use it to correlate the resource it creates.'
:type instance_id: str
:param service: Service information.
:type service: dict | bytes
:param accept_incomplete: Indicates that the client is supporting asynchronous operations
:type accept_incomplete: bool
:rtype: ServiceResponse
"""
ok, message, code = _version_ok()
if not ok:
return message, code
else:
if connexion.request.is_json:
service = ServiceRequest.from_dict(connexion.request.get_json())
else:
return "Supplied body content is not or is mal-formed JSON", 400
svc_type = STORE.get_service_instance(service.service_id)
if len(svc_type) == 1:
return 'Service instance with id {id} already exists'.format(id=service.service_id), 409
# look up manifest based on plan id
# based on the manifest type, select the driver
# send the manifest for creation to the target system
# store the ID along with refs to service, plan and manifest
# get the manifest for the service/plan
# TODO some validation required here to ensure it's the right svc/plan
svc_type = STORE.get_service(service.service_id)[0]
if svc_type is None:
return 'Unrecognised service requested to be instantiated', 404
plans = svc_type.plans
plan = [p for p in plans if p.id == service.plan_id]
if len(plan) <= 0:
return 'no plan found.', 404
mani = STORE.get_manifest(plan_id=plan[0].id)
if len(mani) <= 0:
return 'no manifest for service {plan} found.'.format(plan=service.plan_id), 404
mani = mani[0]
if accept_incomplete: # given docker-compose runs in detached mode this is not needed - only timing can verify
# XXX put this in a thread to allow for asynch processing?
RM.create(instance_id=instance_id, content=mani.manifest_content,
c_type=mani.manifest_type, parameters=service.parameters)
else:
RM.create(instance_id=instance_id, content=mani.manifest_content,
c_type=mani.manifest_type, parameters=service.parameters)
last_op = LastOperation( # stored within the service instance doc
state='creating',
description='service instance is being created'
)
# store the instance Id with manifest id
srv_inst = ServiceInstance(
service_type=svc_type,
state=last_op,
context={
'id': instance_id,
'manifest_id': mani.id,
}
)
STORE.add_service_instance(srv_inst)
if accept_incomplete:
STORE.add_last_operation(instance_id=instance_id, last_operation=last_op)
return 'created', 200
def deprovision_service_instance(instance_id, service_id, plan_id, accept_incomplete=None):
"""
Deprovisions a service instance.
'When a broker receives a deprovision request from a client, it should delete any resources it
created during the provision. Usually this means that all resources are immediately reclaimed for
future provisions.'
:param instance_id: 'The instance_id of a service instance is provided by the client. This ID will be used
for future requests (bind and deprovision), so the broker must use it to correlate the resource it creates.'
:type instance_id: str
:param service_id: service ID to be deprovisioned
:type service_id: str
:param plan_id: plan ID of the service to be deprovisioned
:type plan_id: str
:param accept_incomplete: Indicates that the client is supporting asynchronous operations
:type accept_incomplete: bool
:rtype: UpdateOperationResponse
"""
ok, message, code = _version_ok()
if not ok:
return message, code
else:
# XXX if there's bindings remove first?
# XXX what about undo?
# check that the instance exists first
instance = STORE.get_service_instance(instance_id=instance_id)
if len(instance) == 1:
mani_id = instance[0].context['manifest_id']
mani = STORE.get_manifest(manifest_id=mani_id)
if len(mani) < 1:
return 'no service manifest found.', 404
RM.delete(instance_id=instance_id, manifest_type=mani[0].manifest_type)
STORE.delete_service_instance(instance_id)
# we don't delete the last_operation explicitly as its embedded in the service_instance document
# STORE.delete_last_operation(instance_id)
return Empty(), 200
else:
return Empty(), 404
def _get_instance(srv_inst):
# get the latest info
mani_id = srv_inst.context['manifest_id']
mani = STORE.get_manifest(manifest_id=mani_id)
if len(mani) < 1:
return 'no manifest found.', 404
# Get the latest info of the instance
# could also use STORE.get_service_instance(srv_inst) but will not have all details
inst_info = RM.info(instance_id=srv_inst.context['id'], manifest_type=mani[0].manifest_type)
if inst_info['srv_inst.state.state'] == 'failed':
# try epm.delete(instance_id=instance_id)?
return 'There has been a failure in creating the service instance.', 500
srv_inst.state.state = inst_info['srv_inst.state.state']
srv_inst.state.description = inst_info['srv_inst.state.description']
# don't need you any more, buh-bye!
del inst_info['srv_inst.state.state']
del inst_info['srv_inst.state.description']
# merge the two context dicts
srv_inst.context = {**srv_inst.context, **inst_info}
# update the service instance record - there should be an asynch method doing the update - event based
STORE.add_service_instance(srv_inst)
return srv_inst
def instance_info(instance_id):
"""
Returns information about the service instance.
Returns information about the service instance. This is a simple read operation against the broker database and
is provided as a developer/consumer convienence.
:param instance_id: 'The instance_id of a service instance is provided by the client. This ID will be used
for future requests (bind and deprovision), so the broker must use it to correlate the resource it creates.'
:type instance_id: str
:rtype: ServiceInstance
"""
ok, message, code = _version_ok()
if not ok:
return message, code
else:
# service instance should already be recorded
srv_inst = STORE.get_service_instance(instance_id)
if len(srv_inst) < 1:
return 'no service instance found.', 404
srv_inst = srv_inst[0]
srv_inst = _get_instance(srv_inst)
return srv_inst, 200
def all_instance_info():
"""
Returns information about the service instance.
Returns all service instances that are accessible to the end-user on this service manager.
:rtype: List[ServiceInstance]
"""
ok, message, code = _version_ok()
if not ok:
return message, code
else:
instances = STORE.get_service_instance()
insts = list()
for inst in instances:
insts.append(_get_instance(inst))
return insts, 200
def last_operation_status(instance_id, service_id=None, plan_id=None, operation=None):
"""
Gets the current state of the last operation upon the specified resource.
When a broker returns status code 202 ACCEPTED for provision, update, or deprovision, the client will
begin to poll the /v2/service_instances/:guid/last_operation endpoint to obtain the state of the last requested
operation. The broker response must contain the field state and an optional field description.
:param instance_id: The instance_id of a service instance is provided by the client. This ID will be used for
future requests (bind and deprovision), so the broker must use it to correlate the resource it creates.
:type instance_id: str
:param service_id: ID of the service from the catalog.
:type service_id: str
:param plan_id: ID of the plan from the catalog.
:type plan_id: str
:param operation: A broker-provided identifier for the operation. When a value for operation is included
with asynchronous responses for Provision, Update, and Deprovision requests, the broker client should provide
the same value using this query parameter as a URL-encoded string.;
:type operation: str
:rtype: LastOperation
"""
ok, message, code = _version_ok()
if not ok:
return message, code
else:
# just re-use the method and return it's content and http status code.
# version check not required here as it's done in the proxied call
srv_inst, code = instance_info(instance_id=instance_id)
if code == 404:
return srv_inst + 'No service status therefore.', code
else:
return srv_inst.state, code
def service_bind(instance_id, binding_id, binding):
"""
Binds to a service
When the broker receives a bind request from the client, it should return information which helps an application
to utilize the provisioned resource. This information is generically referred to as credentials. Applications
should be issued unique credentials whenever possible, so one application access can be revoked without affecting
other bound applications.
:param instance_id: The instance_id of a service instance is provided by the client. This ID will be used
for future requests (bind and deprovision), so the broker must use it to correlate the resource it creates.
:type instance_id: str
:param binding_id: The binding_id of a service binding is provided by the Cloud Controller.
:type binding_id: str
:param binding:
:type binding: dict | bytes
:rtype: BindingResponse
"""
ok, message, code = _version_ok()
if not ok:
return message, code
else:
if connexion.request.is_json:
binding = BindingRequest.from_dict(connexion.request.get_json())
else:
return "Supplied body content is not or is mal-formed JSON", 400
return 'Not implemented :-(', 501
def service_unbind(instance_id, binding_id, service_id, plan_id):
"""
Unbinds a service
When a broker receives an unbind request from the client, it should delete any resources it created in bind.
Usually this means that an application immediately cannot access the resource.
:param instance_id: The instance_id of a service instance is provided by the client. This ID will be used
for future requests (bind and deprovision), so the broker must use it to correlate the resource it creates.
:type instance_id: str
:param binding_id: The binding_id of a service binding is provided by the Cloud Controller.
:type binding_id: str
:param service_id: ID of the service from the catalog.
:type service_id: str
:param plan_id: ID of the plan from the catalog.
:type plan_id: str
:rtype: Empty
"""
ok, message, code = _version_ok()
if not ok:
return message, code
else:
return 'Not implemented :-(', 501
def update_service_instance(instance_id, plan, accept_incomplete=None):
"""
Updating a Service Instance
Brokers that implement this endpoint can enable users to modify attributes of an existing service instance.
The first attribute supports users modifying is the service plan. This effectively enables users to upgrade
or downgrade their service instance to other plans. To see how users make these requests.'
:param instance_id: The instance_id of a service instance is provided by the client. This ID will be used
for future requests (bind and deprovision), so the broker must use it to correlate the resource it creates.
:type instance_id: str
:param plan: New Plan information.
:type plan: dict | bytes
:param accept_incomplete: Indicates that the client is supporting asynchronous operations
:type accept_incomplete: bool
:rtype: Empty
"""
ok, message, code = _version_ok()
if not ok:
return message, code
else:
if connexion.request.is_json:
plan = UpdateRequest.from_dict(connexion.request.get_json())
else:
return "Supplied body content is not or is mal-formed JSON", 400
return 'Not implemented :-(', 501
| EduJGURJC/elastest-service-manager | src/esm/controllers/service_instances_controller.py | Python | apache-2.0 | 14,612 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests Hamming metrics."""
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow_addons.metrics import HammingLoss, hamming_distance
def test_config():
hl_obj = HammingLoss(mode="multilabel", threshold=0.8)
assert hl_obj.name == "hamming_loss"
assert hl_obj.dtype == tf.float32
def check_results(obj, value):
np.testing.assert_allclose(value, obj.result().numpy(), atol=1e-5)
def test_mc_4_classes():
actuals = tf.constant(
[
[1, 0, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[0, 1, 0, 0],
[1, 0, 0, 0],
[0, 0, 1, 0],
],
dtype=tf.float32,
)
predictions = tf.constant(
[
[0.85, 0.12, 0.03, 0],
[0, 0, 1, 0],
[0.10, 0.045, 0.045, 0.81],
[1, 0, 0, 0],
[0.80, 0.10, 0.10, 0],
[1, 0, 0, 0],
[0.05, 0, 0.90, 0.05],
],
dtype=tf.float32,
)
# Initialize
hl_obj = HammingLoss("multiclass", threshold=0.8)
hl_obj.update_state(actuals, predictions)
# Check results
check_results(hl_obj, 0.2857143)
def test_mc_5_classes():
actuals = tf.constant(
[
[1, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
],
dtype=tf.float32,
)
predictions = tf.constant(
[
[0.85, 0, 0.15, 0, 0],
[0, 0, 0, 1, 0],
[0, 1, 0, 0, 0],
[0.05, 0.90, 0.04, 0, 0.01],
[0.10, 0, 0.81, 0.09, 0],
[0.10, 0.045, 0, 0.81, 0.045],
[1, 0, 0, 0, 0],
[0, 0.85, 0, 0, 0.15],
],
dtype=tf.float32,
)
# Initialize
hl_obj = HammingLoss("multiclass", threshold=0.8)
hl_obj.update_state(actuals, predictions)
# Check results
check_results(hl_obj, 0.25)
def test_ml_4_classes():
actuals = tf.constant([[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 0, 1]], dtype=tf.float32)
predictions = tf.constant(
[[0.97, 0.56, 0.83, 0.77], [0.34, 0.95, 0.7, 0.89], [0.95, 0.45, 0.23, 0.56]],
dtype=tf.float32,
)
# Initialize
hl_obj = HammingLoss("multilabel", threshold=0.8)
hl_obj.update_state(actuals, predictions)
# Check results
check_results(hl_obj, 0.16666667)
def test_ml_5_classes():
actuals = tf.constant(
[
[1, 0, 0, 0, 0],
[0, 0, 1, 1, 0],
[0, 1, 0, 1, 0],
[0, 1, 1, 0, 0],
[0, 0, 1, 1, 0],
[0, 0, 1, 1, 0],
[1, 0, 0, 0, 1],
[0, 1, 1, 0, 0],
],
dtype=tf.float32,
)
predictions = tf.constant(
[
[1, 0.75, 0.2, 0.55, 0],
[0.65, 0.22, 0.97, 0.88, 0],
[0, 1, 0, 1, 0],
[0, 0.85, 0.9, 0.34, 0.5],
[0.4, 0.65, 0.87, 0, 0.12],
[0.66, 0.55, 1, 0.98, 0],
[0.95, 0.34, 0.67, 0.65, 0.10],
[0.45, 0.97, 0.89, 0.67, 0.46],
],
dtype=tf.float32,
)
# Initialize
hl_obj = HammingLoss("multilabel", threshold=0.7)
hl_obj.update_state(actuals, predictions)
# Check results
check_results(hl_obj, 0.075)
def hamming_distance_test():
actuals = tf.constant([1, 1, 0, 0, 1, 0, 1, 0, 0, 1], dtype=tf.int32)
predictions = tf.constant([1, 0, 0, 0, 1, 0, 0, 1, 0, 1], dtype=tf.int32)
test_result = hamming_distance(actuals, predictions)
np.testing.assert_allclose(0.3, test_result, atol=1e-5)
# Keras model check
def test_keras_model():
model = tf.keras.Sequential()
model.add(layers.Dense(64, activation="relu"))
model.add(layers.Dense(3, activation="softmax"))
h1 = HammingLoss(mode="multiclass")
model.compile(optimizer="rmsprop", loss="categorical_crossentropy", metrics=[h1])
data = np.random.random((100, 10))
labels = np.random.random((100, 3))
model.fit(data, labels, epochs=1, batch_size=32, verbose=0)
| tensorflow/addons | tensorflow_addons/metrics/tests/hamming_test.py | Python | apache-2.0 | 4,870 |
# Case Conductor is a Test Case Management system.
# Copyright (C) 2011 uTest Inc.
#
# This file is part of Case Conductor.
#
# Case Conductor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Case Conductor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Case Conductor. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls.defaults import patterns, url, include
from django.conf import settings
urlpatterns = patterns(
"",
url("^account/", include("ccui.users.urls")),
# run tests
url("^$", "ccui.testexecution.views.home", name="runtests"),
url("^environment/(?P<testrun_id>\d+)/$",
"ccui.environments.views.set_environment",
name="runtests_environment"),
url("^run/(?P<testrun_id>\d+)/$",
"ccui.testexecution.views.runtests",
name="runtests_run"),
# runtests ajax
url("^runtests/_finder/environments/(?P<run_id>\d+)/",
"ccui.testexecution.views.finder_environments",
name="runtests_finder_environments"),
url("^_result/(?P<result_id>\d+)/$",
"ccui.testexecution.views.result",
name="result"),
# manage
url("^manage/", include("ccui.manage.urls")),
# results
url("^results/", include("ccui.results.urls")),
)
if settings.DEBUG:
urlpatterns += patterns(
"",
url("^debug/", include("ccui.debug.urls")),
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.MEDIA_ROOT,
}),
)
| mozilla/caseconductor-ui | ccui/urls.py | Python | gpl-3.0 | 1,967 |
from django import VERSION
import os
from django.contrib.admin import ModelAdmin, helpers
from django.contrib.admin.util import unquote
from django.conf.urls import patterns, url
from django.utils.encoding import force_text
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy as lazy
from django.utils.html import escape
from django.forms.models import model_to_dict
from django.forms.formsets import all_valid
from django.core.urlresolvers import reverse
from django.core.exceptions import PermissionDenied
from django.http import Http404
from django.db.models.fields.files import FieldFile
from django.db import models
from django.utils.datastructures import MultiValueDict
from django.core.files.base import ContentFile
__all__ = 'ClonableModelAdmin',
class ClonableModelAdmin(ModelAdmin):
clone_verbose_name = _('Duplicate')
change_form_template = 'admin/modelclone/change_form.html'
def clone_link(self, clonable_model):
'''
Method to be used on `list_display`, renders a link to clone model
'''
_url = reverse(
'admin:{0}_{1}_clone'.format(
clonable_model._meta.app_label,
getattr(clonable_model._meta, 'module_name', getattr(clonable_model._meta, 'model_name', ''))),
args=(clonable_model._get_pk_val(),),
current_app=self.admin_site.name
)
return '<a href="{0}">{1}</a>'.format(_url, self.clone_verbose_name)
clone_link.short_description = clone_verbose_name # not overridable by subclass
clone_link.allow_tags = True
def get_urls(self):
url_name = '{0}_{1}_clone'.format(
self.model._meta.app_label,
getattr(self.model._meta, 'module_name', getattr(self.model._meta, 'model_name', '')))
new_urlpatterns = patterns('',
url(r'^(.+)/clone/$',
self.admin_site.admin_view(self.clone_view),
name=url_name)
)
original_urlpatterns = super(ClonableModelAdmin, self).get_urls()
return new_urlpatterns + original_urlpatterns
def change_view(self, request, object_id, form_url='', extra_context=None):
extra_context = extra_context or {}
extra_context.update({
'clone_verbose_name': self.clone_verbose_name,
'include_clone_link': True,
})
return super(ClonableModelAdmin, self).change_view(request, object_id, form_url, extra_context)
def clone_view(self, request, object_id, form_url='', extra_context=None):
opts = self.model._meta
if not self.has_add_permission(request):
raise PermissionDenied
original_obj = self.get_object(request, unquote(object_id))
if original_obj is None:
raise Http404(_('{name} object with primary key {key} does not exist.'.format(
name=force_text(opts.verbose_name),
key=repr(escape(object_id))
)))
ModelForm = self.get_form(request)
formsets = []
# NOTE: Django 1.5 has a secong argument on get_inline_instances()
inline_instances = self.get_inline_instances(request)
if request.method == 'POST':
files_data = MultiValueDict()
for field in [f for f in original_obj._meta.fields if isinstance(f, models.FileField)]:
f = getattr(original_obj, field.name)
if f:
tmp_file = ContentFile(f.read(), os.path.basename(f.name))
files_data.update(**{ field.name: tmp_file })
for rf in [rel.field for rel in original_obj._meta.get_all_related_objects()
if isinstance(rel.field, models.ForeignKey) and rel.field.rel.related_name]:
related_name = rf.rel.related_name
related = getattr(original_obj, related_name)
i = 0
for obj in related.all():
print obj
for field in [f for f in obj._meta.fields if isinstance(f, models.FileField)]:
f = getattr(obj, field.name)
if f:
tmp_file = ContentFile(f.read(), os.path.basename(f.name))
files_data.update(**{ u'%s-%s-%s' % (related_name, i, field.name): tmp_file })
i += 1
files_data.update(request.FILES)
form = ModelForm(request.POST, files_data)
if form.is_valid():
new_object = self.save_form(request, form, change=False)
form_validated = True
else:
new_object = self.model()
form_validated = False
prefixes = {}
for FormSet, inline in zip(self.get_formsets(request), inline_instances):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1 or not prefix:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset = FormSet(data=request.POST, files=files_data,
instance=new_object,
save_as_new="_saveasnew" in request.POST, # ????
prefix=prefix)
formsets.append(formset)
if all_valid(formsets) and form_validated:
# if original model has any file field, save new model
# with same paths to these files
for name in vars(original_obj):
field = getattr(original_obj, name)
if isinstance(field, FieldFile) and name not in request.FILES:
setattr(new_object, name, field)
self.save_model(request, new_object, form, False)
self.save_related(request, form, formsets, False)
self.log_addition(request, new_object)
if VERSION[1] <= 4:
# Until Django 1.4 giving %s in the url would be replaced with
# object primary key.
# I can't use the default because it goes back only one level
# ('../%s/') and now we are under clone url, so we need one more level
post_url_continue = '../../%s/'
else:
# Since 1.5 '%s' was deprecated and if None is given reverse() will
# be used and do the right thing
post_url_continue = None
return self.response_add(request, new_object, post_url_continue)
else:
initial = model_to_dict(original_obj)
initial = self.tweak_cloned_fields(initial)
form = ModelForm(initial=initial)
prefixes = {}
for FormSet, inline in zip(self.get_formsets(request), inline_instances):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1 or not prefix:
prefix = "%s-%s" % (prefix, prefixes[prefix])
initial = []
# Django 1.8 Patch
if hasattr(inline, 'queryset'):
get_queryset = inline.queryset
else:
get_queryset = inline.get_queryset
queryset = get_queryset(request).filter(
**{FormSet.fk.name: original_obj})
for obj in queryset:
initial.append(model_to_dict(obj, exclude=[obj._meta.pk.name,
FormSet.fk.name]))
initial = self.tweak_cloned_inline_fields(prefix, initial)
formset = FormSet(prefix=prefix, initial=initial)
# Since there is no way to customize the `extra` in the constructor,
# construct the forms again...
# most of this view is a hack, but this is the ugliest one
formset.extra = len(initial) + formset.extra
# _construct_forms() was removed on django 1.6
# see https://github.com/django/django/commit/ef79582e8630cb3c119caed52130c9671188addd
if hasattr(formset, '_construct_forms'):
formset._construct_forms()
formsets.append(formset)
admin_form = helpers.AdminForm(
form,
list(self.get_fieldsets(request)),
self.get_prepopulated_fields(request),
self.get_readonly_fields(request),
model_admin=self
)
media = self.media + admin_form.media
inline_admin_formsets = []
for inline, formset in zip(inline_instances, formsets):
fieldsets = list(inline.get_fieldsets(request, original_obj))
readonly = list(inline.get_readonly_fields(request, original_obj))
prepopulated = dict(inline.get_prepopulated_fields(request, original_obj))
inline_admin_formset = InlineAdminFormSetFakeOriginal(inline, formset,
fieldsets, prepopulated, readonly, model_admin=self)
inline_admin_formsets.append(inline_admin_formset)
media = media + inline_admin_formset.media
title = u'{0} {1}'.format(self.clone_verbose_name, opts.verbose_name)
context = {
'title': title,
'original': title,
'adminform': admin_form,
'is_popup': "_popup" in request.REQUEST,
'show_delete': False,
'media': media,
'inline_admin_formsets': inline_admin_formsets,
'errors': helpers.AdminErrorList(form, formsets),
'app_label': opts.app_label,
}
context.update(extra_context or {})
return self.render_change_form(request,
context,
form_url=form_url,
change=False
)
def tweak_cloned_fields(self, fields):
"""Override this method to tweak a cloned object before displaying its form.
``fields`` is a dictionary containing the cloned object's field data (the result of
``model_to_dict()``).
It does *not* contain inline fields. To tweak inline fields, override
``tweak_cloned_inline_fields()``.
This method returns the modified ``fields``.
"""
return fields
def tweak_cloned_inline_fields(self, related_name, fields_list):
"""Override this method to tweak a cloned inline before displaying its form.
``related_name`` is the name of the relation being inlined. Note that if you've inline the
same relation more than once, ``related_name`` will have a numerical suffix, for example,
``comment_set-2``.
``fields_list`` is a list of dictionaries containing the inline field data (the result of
``model_to_dict()`` for each inlined row).
This method returns the modified ``fields_list``.
"""
return fields_list
class InlineAdminFormSetFakeOriginal(helpers.InlineAdminFormSet):
def __iter__(self):
# the template requires the AdminInlineForm to have an `original`
# attribute, which is the model instance, in order to display the
# 'Delete' checkbox
# we don't have `original` because we are just providing initial
# data to the form, so we attach a "fake original" (something that
# evaluates to True) to fool the template and make is display
# the 'Delete' checkbox
# needless to say this is a terrible hack and will break in future
# django versions :)
for inline_form in super(InlineAdminFormSetFakeOriginal, self).__iter__():
if inline_form.form.initial:
inline_form.original = True
yield inline_form | Excentrics/publication-backbone | publication_backbone/admin/modelcloneadmin.py | Python | bsd-3-clause | 11,957 |
#!/usr/bin/env python
# Copyright Contributors to the Open Shading Language project.
# SPDX-License-Identifier: BSD-3-Clause
# https://github.com/AcademySoftwareFoundation/OpenShadingLanguage
#osl_concat
command += testshade("-t 1 -g 64 64 str_concat -od uint8 -o res concat_ref.tif -o res_m concat_m_ref.tif")
#osl_stoi
command += testshade("-t 1 -g 64 64 str_stoi -od uint8 -o res stoi_ref.tif -o res_m stoi_m_ref.tif")
#osl_endswith
command += testshade("-t 1 -g 64 64 str_endswith -od uint8 -o res_t endswith_t_ref.tif -o res_f endswith_f_ref.tif"
" -o res_t_m endswith_t_m_ref.tif -o res_f_m endswith_f_m_ref.tif")
#osl_getchar
command += testshade("-t 1 -g 64 64 str_getchar -od uint8 str_getchar -o res_t1 getchar_t1_ref.tif -o res_t2 getchar_t2_ref.tif"
" -o res_f1 getchar_f1_ref.tif -o res_f2 getchar_f2_ref.tif"
" -o res_t1_m getchar_t1_m_ref.tif -o res_t2_m getchar_t2_m_ref.tif"
" -o res_f1_m getchar_f1_m_ref.tif -o res_f2_m getchar_f2_m_ref.tif")
#osl_hash
command += testshade("-t 1 -g 64 64 str_hash -od uint8 -o res hash_ref.tif -o res_m hash_m_ref.tif")
#osl_startswith
command += testshade("-t 1 -g 64 64 str_startswith -od uint8 -o res_t startswith_t_ref.tif -o res_f startswith_f_ref.tif"
" -o res_t_m startswith_t_m_ref.tif -o res_f_m startswith_f_m_ref.tif")
#osl_stof
command += testshade("-t 1 -g 64 64 str_stof -od uint8 -o res stof_ref.tif -o res_m stof_m_ref.tif")
#osl_strlen
command += testshade("-t 1 -g 64 64 str_strlen -od uint8 -o res strlen_ref.tif -o res_m strlen_m_ref.tif")
#osl_substr
command += testshade("-t 1 -g 64 64 str_substr -od uint8 -o res sub_ref.tif -o res1 sub1_ref.tif -o res2 sub2_ref.tif"
" -o res_m sub_m_ref.tif -o res1_m sub1_m_ref.tif -o res2_m sub2_m_ref.tif")
outputs = [
"concat_ref.tif",
"concat_m_ref.tif",
"stoi_ref.tif",
"stoi_m_ref.tif",
"endswith_t_ref.tif",
"endswith_f_ref.tif",
"endswith_t_m_ref.tif",
"endswith_f_m_ref.tif",
"getchar_t1_ref.tif",
"getchar_t2_ref.tif",
"getchar_f1_ref.tif",
"getchar_f2_ref.tif",
"getchar_t1_m_ref.tif",
"getchar_t2_m_ref.tif",
"getchar_f1_m_ref.tif",
"getchar_f2_m_ref.tif",
"hash_ref.tif",
"hash_m_ref.tif",
"startswith_t_ref.tif",
"startswith_f_ref.tif",
"startswith_t_m_ref.tif",
"startswith_f_m_ref.tif",
"stof_ref.tif",
"stof_m_ref.tif",
"strlen_ref.tif",
"strlen_m_ref.tif",
"sub_ref.tif",
"sub1_ref.tif",
"sub2_ref.tif",
"sub_m_ref.tif",
"sub1_m_ref.tif",
"sub2_m_ref.tif",
]
# expect a few LSB failures
failthresh = 0.008
failpercent = 3
| lgritz/OpenShadingLanguage | testsuite/string-reg/run.py | Python | bsd-3-clause | 2,776 |
# Copyright 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from iotronic.common import context as iotronic_context
from iotronic.common import exception
__all__ = [
'init',
'cleanup',
'set_defaults',
'add_extra_exmods',
'clear_extra_exmods',
'get_allowed_exmods',
'RequestContextSerializer',
'get_client',
'get_server',
'get_notifier',
'TRANSPORT_ALIASES',
]
CONF = cfg.CONF
TRANSPORT = None
NOTIFIER = None
ALLOWED_EXMODS = [
exception.__name__,
]
EXTRA_EXMODS = []
# NOTE(lucasagomes): The iotronic.openstack.common.rpc entries are for
# backwards compat with IceHouse rpc_backend configuration values.
TRANSPORT_ALIASES = {
'iotronic.openstack.common.rpc.impl_kombu': 'rabbit',
'iotronic.openstack.common.rpc.impl_qpid': 'qpid',
'iotronic.openstack.common.rpc.impl_zmq': 'zmq',
'iotronic.rpc.impl_kombu': 'rabbit',
'iotronic.rpc.impl_qpid': 'qpid',
'iotronic.rpc.impl_zmq': 'zmq',
}
def init(conf):
global TRANSPORT, NOTIFIER
exmods = get_allowed_exmods()
TRANSPORT = messaging.get_transport(conf,
allowed_remote_exmods=exmods,
aliases=TRANSPORT_ALIASES)
serializer = RequestContextSerializer(JsonPayloadSerializer())
NOTIFIER = messaging.Notifier(TRANSPORT, serializer=serializer)
def cleanup():
global TRANSPORT, NOTIFIER
assert TRANSPORT is not None
assert NOTIFIER is not None
TRANSPORT.cleanup()
TRANSPORT = NOTIFIER = None
def set_defaults(control_exchange):
messaging.set_transport_defaults(control_exchange)
def add_extra_exmods(*args):
EXTRA_EXMODS.extend(args)
def clear_extra_exmods():
del EXTRA_EXMODS[:]
def get_allowed_exmods():
return ALLOWED_EXMODS + EXTRA_EXMODS
class JsonPayloadSerializer(messaging.NoOpSerializer):
@staticmethod
def serialize_entity(context, entity):
return jsonutils.to_primitive(entity, convert_instances=True)
class RequestContextSerializer(messaging.Serializer):
def __init__(self, base):
self._base = base
def serialize_entity(self, context, entity):
if not self._base:
return entity
return self._base.serialize_entity(context, entity)
def deserialize_entity(self, context, entity):
if not self._base:
return entity
return self._base.deserialize_entity(context, entity)
def serialize_context(self, context):
return context.to_dict()
def deserialize_context(self, context):
return iotronic_context.RequestContext.from_dict(context)
def get_transport_url(url_str=None):
return messaging.TransportURL.parse(CONF, url_str, TRANSPORT_ALIASES)
def get_client(target, version_cap=None, serializer=None):
assert TRANSPORT is not None
serializer = RequestContextSerializer(serializer)
return messaging.RPCClient(TRANSPORT,
target,
version_cap=version_cap,
serializer=serializer)
def get_server(target, endpoints, serializer=None):
assert TRANSPORT is not None
serializer = RequestContextSerializer(serializer)
return messaging.get_rpc_server(TRANSPORT,
target,
endpoints,
executor='eventlet',
serializer=serializer)
def get_notifier(service=None, host=None, publisher_id=None):
assert NOTIFIER is not None
if not publisher_id:
publisher_id = "%s.%s" % (service, host or CONF.host)
return NOTIFIER.prepare(publisher_id=publisher_id)
| MDSLab/s4t-iotronic | iotronic/common/rpc.py | Python | apache-2.0 | 4,386 |
__author__ = 'dengzhihong'
from src.Regression.base import *
from scipy import optimize
class LASSO(RegressionBase):
@staticmethod
def run(sampx, sampy, K):
y = RegressionBase.strlistToFloatvector(sampy)
fai_matrix = RegressionBase.constructFaiMartix(sampx, K)
product_fai = np.dot(fai_matrix, np.transpose(fai_matrix))
n = len(sampx)
D = K + 1
Lambda = 0.18
H_matrix = np.array(np.row_stack( (np.column_stack( (product_fai,-product_fai) ), np.column_stack( (-product_fai,product_fai) )) ))
f_matrix = np.array(np.row_stack( (np.dot(fai_matrix,y), - np.dot(fai_matrix, y) ) ))
f_matrix = -f_matrix + Lambda
x_matrix = np.array(np.row_stack( (np.ones( (D,1) ), np.ones((D,1)) ) ))
def constraintFunc(x):
#print '-----------------con--------------'
#print "x : ",transpose(x)
return x
def objFunc(x):
#print '-----------------obj--------------'
result = np.dot(np.dot(np.transpose(x), H_matrix), x)/2 + np.dot(np.transpose(f_matrix), x)
#print "obj: ",float(result)
return float(result)
con = ({'type': 'ineq', 'fun': constraintFunc})
res = optimize.minimize(objFunc, x_matrix, method='SLSQP', constraints=con)
theta = []
for i in range(res.x.shape[0]/2):
theta.append(res.x[i] - res.x[i+res.x.shape[0]/2])
return theta | dzh123xt/pythonML | src/Regression/lasso.py | Python | mit | 1,462 |
import unittest
import numpy as np
from .. import DataSet
from ..nodes import BaseNode
class TestBaseNode(unittest.TestCase):
def setUp(self):
data = np.random.rand(4, 10)
labels = np.ones((2, 10))
self.d = DataSet(data, labels)
self.n = BaseNode()
def test_existing_methods(self):
'''Test that masking an exisiting method raises an exception'''
class MaskTrain(BaseNode):
def train(self, d):
pass
class MaskApply(BaseNode):
def apply(self, d):
pass
self.assertRaises(Exception, MaskTrain)
self.assertRaises(Exception, MaskApply)
def test_compatible_train_test(self):
d = self.d
n = self.n
n.train(d)
n.apply(d) # no exception
n.train(d)
self.assertRaises(ValueError, n.apply,
DataSet(data=self.d.data.reshape(2, 2, -1), default=d))
def test_logger_name(self):
class TestNode(BaseNode):
pass
n = BaseNode()
self.assertEqual(n.log.name, 'psychic.nodes.BaseNode')
tn = TestNode()
self.assertEqual(tn.log.name, 'psychic.nodes.TestNode')
def test_train_apply(self):
class TestNode(BaseNode):
def train_(self, d):
self.a = 5
def apply_(self, d):
return DataSet(data=d.data * self.a, default=d)
n = TestNode()
d = self.d
self.assertEqual(n.train(d).apply(d), n.train_apply(d))
self.assertEqual(n.train_apply(d), n.train_apply(d,d))
| wmvanvliet/psychic | psychic/tests/testbasenode.py | Python | bsd-3-clause | 1,594 |
#!/usr/bin/env python
import matplotlib
matplotlib.use('TkAgg')
from numpy import arange, sin, pi,log10,max,min,cos,isnan, meshgrid,sqrt,abs
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg,NavigationToolbar2TkAgg
from matplotlib.figure import Figure
import pyPLUTO as pp
import string
import time
from Tkinter import *
import sys
import os
class App:
def __init__(self,master):
# create toplevel window
frame = Frame(master)
frame.grid(ipadx=10,ipady=10)
try:
sys.argv[1]
except:
self.datatype = None
else:
self.datatype = sys.argv[1].split('--')[1]
if self.datatype == 'hdf5':
print "GUI currently doesnot support pyPLUTO AMR Reader!!"
sys.exit()
self.I = pp.Image()
self.Tool = pp.Tools()
self.lb1=Label(frame, text="Nstep").grid(row=0,column=0)
self.enstep = Entry(frame,width=8)
self.enstep.grid(row=0,column=1)
self.enstep.insert(0, "0")
self.LoadedNstep = StringVar()
self.PresentTime = StringVar()
self.myData = self.loaddata()
self.varkeys = self.myData.vars
self.wdir = self.myData.wdir
if self.myData.n3 != 1:
self.Geom = '3D'
elif self.myData.n3 == 1 and self.myData.n2 != 1:
self.Geom = '2D'
else:
self.Geom = '1D'
self.ldatabutton=Button(frame,text="Load data",command=self.loaddata)
self.ldatabutton.grid(row=0,column=2)
############### MARK THE CUTS #################################
self.ex1 = Entry(frame,width=5)
self.ex1.grid(row=2,column=0)
self.ex1.insert(0, "x1")
self.ex2 = Entry(frame,width=5)
self.ex2.grid(row=2,column=1)
self.ex2.insert(0, "x2")
self.ex3 = Entry(frame,width=5)
self.ex3.grid(row=2,column=2)
self.ex3.insert(0, "x3")
if self.Geom == '2D':
self.ex3.config(state='disabled')
if self.Geom == '1D':
self.ex3.config(state='disabled')
self.ex2.config(state='disabled')
self.ex1.config(state='disabled')
# place a graph somewhere here
self.f = Figure(figsize=(7,7), dpi=100)
self.a = self.f.add_subplot(111)
self.canvas = FigureCanvasTkAgg(self.f, master=root)
self.canvas.show()
self.canvas.get_tk_widget().grid(row=0,column=3,columnspan=10,rowspan=10,sticky=E)
#self.toolbar = NavigationToolbar2TkAgg(self.canvas,tl)
#self.toolbar.update()
#self.canvas._tkcanvas.grid(row=60,column=15,sticky=E)
self.v = StringVar()
self.v.set("None")
################ VARIABLES TO PLOT #################################
for i in ['bx1s', 'bx2s', 'bx3s']:
try:
self.varkeys.remove(i)
except ValueError:
pass
for j in range(len(self.varkeys)):
self.ldata = Radiobutton(frame,text=self.varkeys[j],variable=self.v,value=self.varkeys[j],command=self.getmyvar)
self.ldata.grid(row=3+j,column=0,sticky=W)
################ SLICES CHOICE #################################
self.slvar = StringVar()
self.slvar.set("Choose Slice")
if self.Geom == '3D' :
SliceList = ("Along x1","Along x2","Along x3","Along x1-x2","Along x2-x3","Along x3-x1")
elif self.Geom == '2D' :
SliceList = ("Along x1", "Along x2", "Along x1-x2")
else:
SliceList = ()
for j in range(len(SliceList)):
self.sldata = Radiobutton(frame,text=SliceList[j],variable=self.slvar,value=SliceList[j],command=self.setslice)
self.sldata.grid(row=3+j,column=1,sticky=W)
############### PLOT PROPERTIES #################################
self.logvar = IntVar()
self.chkb = Checkbutton(frame,text="Log ",variable=self.logvar,onvalue=1,offvalue=0,command=self.logchkcall)
self.chkb.grid(row=3,column=2,sticky=W)#(row=15,column=0,sticky=W)
self.polarvar = IntVar()
self.polchkb = Checkbutton(frame,text="Polar",variable=self.polarvar,onvalue=1,offvalue=0,command=self.polchkcall)
self.polchkb.grid(row=4,column=2,sticky=W)#(row=15,column=1)
if self.Geom == '1D':
self.polchkb.config(state='disabled')
self.polarvar.set(0)
self.preaspect = IntVar()
self.aspectb = Checkbutton(frame,text="Aspect",variable=self.preaspect,onvalue=1,offvalue=0,command=self.aspchkcall)
self.aspectb.grid(row=5,column=2,sticky=W)#(row=15,column=2)
if self.Geom == '1D':
self.aspectb.config(state='disabled')
################ X and Y LABELS #################################
self.lb2=Label(frame,text="Labels").grid(row=22,column=0)
self.xlb = Entry(frame,width=15)
self.xlb.grid(row=22,column=1)
self.xlb.insert(0, "xlabel")
self.ylb = Entry(frame,width=15)
self.ylb.grid(row=22,column=2)
self.ylb.insert(0, "ylabel")
############### X and Y RANGE#######################
self.lb2a=Label(frame,text="XRange").grid(row=24,column=0)
self.lb2b=Label(frame,text="YRange").grid(row=26,column=0)
self.lb2c=Label(frame,text="VarRange").grid(row=28,column=0)
self.xrmin = Entry(frame,width=15)
self.xrmin.grid(row=24,column=1)
self.xrmin.insert(0,'')
self.xrmax = Entry(frame,width=15)
self.xrmax.grid(row=24,column=2)
self.xrmax.insert(0,'')
self.yrmin = Entry(frame,width=15)
self.yrmin.grid(row=26,column=1)
self.yrmin.insert(0,'')
self.yrmax = Entry(frame,width=15)
self.yrmax.grid(row=26,column=2)
self.yrmax.insert(0,'')
self.varmin = Entry(frame,width=15)
self.varmin.grid(row=28,column=1)
self.varmin.insert(0,'')
self.varmax = Entry(frame,width=15)
self.varmax.grid(row=28,column=2)
self.varmax.insert(0,'')
if self.Geom == '1D':
self.yrmin.config(state='disabled')
self.yrmax.config(state='disabled')
################ CONTOURS #################################
self.lb3=Label(frame,text="Contours").grid(row=16,column=0)
self.contvar = IntVar()
self.chkb = Checkbutton(frame,text="Contour",variable=self.contvar,onvalue=1,offvalue=0,command=self.contchkcall)
self.chkb.grid(row=6,column=2,sticky=W)#(row=16,column=0,sticky=W)
self.plcont = StringVar()
self.contkeys = ["None"]
if "bx3" in self.varkeys:
for item in self.varkeys:
self.contkeys.append(item)
self.contkeys.append("x1*bx3")
if "Ax3" in self.varkeys:
self.contkeys.append("x1*Ax3")
else:
for item in self.varkeys:
self.contkeys.append(item)
self.plcont.set("None")
self.contmenu = OptionMenu(frame, self.plcont,*self.contkeys)
self.contmenu.grid(row=16,column=1)
self.xlevb = Entry(frame,width=15)
self.xlevb.grid(row=16,column=2,sticky=W)
self.xlevb.insert(0, "Levels")
self.xlevb.config(state='disabled')
self.contmenu.config(state='disabled')
if self.Geom == '1D':
self.chkb.config(state = 'disabled')
################ ARROWS #################################
self.lb4=Label(frame,text="Arrows").grid(row=19,column=0)
self.arrowvar = IntVar()
self.arrowchkb = Checkbutton(frame,text="Arrows",variable=self.arrowvar,onvalue=1,offvalue=0,command=self.arrchkcall)
self.arrowchkb.grid(row=7,column=2,sticky=W)#(row=16,column=0,sticky=W)
self.arrspb = Entry(frame,width=15)
self.arrspb.grid(row=19,column=2,sticky=W)
self.arrspb.insert(0, "20")
self.plarr = StringVar()
self.arrkeys = ["None"]
self.arrkeys.append("Vp")
self.arrkeys.append("Vp_norm")
if "bx1" in self.varkeys:
self.arrkeys.append("Bp")
self.arrkeys.append("Bp_norm")
self.plarr.set("None")
self.arrmenu = OptionMenu(frame,self.plarr,*self.arrkeys)
self.arrmenu.grid(row=19,column=1)
self.arrmenu.config(state='disabled')
self.arrspb.config(state='disabled')
if self.Geom == '1D':
self.arrowchkb.config(state = 'disabled')
################ VARIOUS PLOTTING BUTTONS #################################
self.pltbutton=Button(frame,text="Plot",command=self.plotfinal)
self.pltbutton.grid(row=36,column=0)
if self.Geom == '1D':
self.pltbutton.config(state='active')
else:
self.pltbutton.config(state='disabled')
self.surfbutton=Button(frame,text="Surface",command=self.plotsurface)
self.surfbutton.grid(row=36,column=1)
self.surfbutton.config(state='disabled')
#if self.Geom == '1D':
# self.surfbutton.config(state='disabled')
self.clrbutton=Button(frame,text="Clear",command=self.plotclear)
self.clrbutton.grid(row=36,column=2)
################ INFORMATION #################################
self.lbinf0 = Label(frame,text="Information",font=("Times",12,"bold"))
self.lbinf0.grid(row=47,column=0,sticky=W,columnspan=3)
self.lbinf1a = Label(frame,text="Dir :",font=("Times",10,"bold")).grid(row=49,column=0,sticky=W,columnspan=3)
self.lbinf1 = Label(frame,text=self.wdir).grid(row=50,column=0,sticky=W,columnspan=3)
self.lbinf2a = Label(frame,text="Domain :",font=("Times",10,"bold")).grid(row=51,column=0,sticky=W,columnspan=3)
self.lbinf2 = Label(frame,text="n1 x n2 x n3 = %d x %d x %d " % (self.myData.n1,self.myData.n2,self.myData.n3)).grid(row=52,column=0,sticky=W,columnspan=3)
self.lbinf3a = Label(frame,text="Time Status",font=("Times",10,"bold")).grid(row=53,column=0,sticky=W,columnspan=3)
self.lbinf4 = Label(frame,text="Nlast = %d"% pp.nlast_info(w_dir=self.wdir,datatype=self.datatype)['nlast']).grid(row=54,column=0,sticky=W,columnspan=3)
self.lbinf5 = Label(frame,textvariable = self.LoadedNstep).grid(row=55,column=0,sticky=W,columnspan=3)
self.lbinf6 = Label(frame,textvariable = self.PresentTime).grid(row=56,column=0,sticky=W,columnspan=3)
################ VARIOUS FUNCTIONS #################################
def loaddata(self):
try:
int(self.enstep.get().strip().split()[0])
except (ValueError, IndexError):
print "Specify the proper value of Nstep"
else:
mynstep=int(self.enstep.get())
self.D = pp.pload(mynstep,datatype=self.datatype)
self.LoadedNstep.set("Loaded Nstep = "+self.enstep.get())
self.PresentTime.set("Present Time = "+str(self.D.SimTime) + " [cu]")
return self.D
def getmyvar(self):
try:
self.v.get() != "None"
except KeyError:
print "Specify the variable to plot"
else:
self.myvar=self.v.get()
def logchkcall(self):
self.logchk = self.logvar.get()
def contchkcall(self):
self.contchk = self.contvar.get()
if self.contchk == 1:
self.contmenu.config(state='normal')
self.xlevb.config(state='normal')
else:
self.contmenu.config(state='disabled')
self.xlevb.config(state='disabled')
def arrchkcall(self):
self.arrchk = self.arrowvar.get()
if self.arrchk == 1:
self.arrmenu.config(state='normal')
self.arrspb.config(state='normal')
else:
self.arrmenu.config(state='disabled')
self.arrspb.config(state='disabled')
def aspchkcall(self):
self.aspchk=self.preaspect.get()
def polchkcall(self):
self.polchk = self.polarvar.get()
def setslice(self):
self.slicename=self.slvar.get()
if self.slicename == "Along x1" or self.slicename == "Along x2" or self.slicename == "Along x3":
self.surfbutton.config(state='disabled')
self.arrowchkb.config(state = 'disabled')
self.arrowvar.set(0)
self.chkb.config(state = 'disabled')
self.contvar.set(0)
self.pltbutton.config(state='active')
self.polchkb.config(state='disabled')
self.polarvar.set(0)
else:
self.pltbutton.config(state='disabled')
self.arrowchkb.config(state = 'normal')
self.chkb.config(state = 'normal')
self.surfbutton.config(state='active')
self.polchkb.config(state='normal')
if self.slicename == "Along x2-x3":
self.polchkb.config(state='disabled')
self.polarvar.set(0)
def plotclear(self):
self.f.clf()
self.a = self.f.add_subplot(111)
self.canvas.show()
def plotfinal(self):
if self.getplotvar() == True:
self.a.axis([self.getxaxisrange()[0],self.getxaxisrange()[1],self.getvarrange()[0],self.getvarrange()[1]])
self.a.plot(self.x,self.var)
self.a.set_aspect('auto')
self.a.set_xlabel(self.xlb.get())
self.a.set_ylabel(self.ylb.get())
self.canvas.show()
def plotsurface(self):
tdum = time.time()
self.plotclear()
if self.preaspect.get() == 1:
self.a.set_aspect('equal')
else:
self.a.set_aspect('auto')
if self.polarvar.get() == 1:
if self.drawpolar() == True:
self.a.axis([self.getxaxisrange()[0],self.getxaxisrange()[1],self.getyaxisrange()[0],self.getyaxisrange()[1]])
self.image = self.a.imshow(self.SphData[self.myvar], origin='lower',extent=self.extent, interpolation='nearest',cmap="jet", vmin=self.getvarrange()[0],vmax=self.getvarrange()[1])
self.f.colorbar(self.image)
else:
if self.getsurfvar() == True:
self.a.axis([self.getxaxisrange()[0],self.getxaxisrange()[1],self.getyaxisrange()[0],self.getyaxisrange()[1]])
self.image=self.a.pcolormesh(self.x,self.y,self.var,cmap='jet',vmin=self.getvarrange()[0],vmax=self.getvarrange()[1])
self.f.colorbar(self.image)
if self.contvar.get() == 1:
try:
self.plcont.get() != "None"
except KeyError:
print "Specify the variable for Contour"
else:
self.drawcontour()
self.contlevlist=[]
self.contlevstr = string.split(self.xlevb.get(),',')
try:
if self.contlevstr[0] == 'log':
self.flevel = self.contlevstr[1]
self.varcont = log10(self.varcont)
else:
self.flevel = self.contlevstr[0]
float(self.flevel)
self.contlevlist = [float(self.flevel)]
except:
self.contlevlist = 5
else:
for j in range(1,len(self.contlevstr)):
self.contlevlist.append(float(self.contlevstr[j]))
self.cs1 = self.a.contour(self.xcont,self.ycont,self.varcont,self.contlevlist,colors="w")
self.a.clabel(self.cs1,inline=True)
if self.arrowvar.get() == 1:
try:
self.plarr.get() != "None"
except KeyError:
print "Specify the variable for plotting the arrow"
else:
self.drawarrow()
self.a.quiver(self.xcong, self.ycong, self.xveccong, self.yveccong,color='w')
self.a.set_xlabel(self.xlb.get())
self.a.set_ylabel(self.ylb.get())
self.canvas.show()
def getvarrange(self):
try:
float(self.varmin.get())
except:
if self.polarvar.get() != 1:
self.varminval = min(self.var)
else:
self.varminval = min(self.SphData[self.myvar][self.isnotnan].flat)#self.minPl
else:
self.varminval = float(self.varmin.get())
try:
float(self.varmax.get())
except:
if self.polarvar.get() != 1:
self.varmaxval = max(self.var)
else:
self.varmaxval = max(self.SphData[self.myvar][self.isnotnan].flat)#self.maxPl
else:
self.varmaxval = float(self.varmax.get())
return [self.varminval,self.varmaxval]
def getxaxisrange(self):
try:
float(self.xrmin.get())
except:
if self.polarvar.get() != 1:
self.xminval = min(self.x)
else:
self.xminval = min(self.R.flat)
else:
self.xminval = float(self.xrmin.get())
try:
float(self.xrmax.get())
except:
if self.polarvar.get() != 1:
self.xmaxval = max(self.x)
else:
self.xmaxval = max(self.R.flat)
else:
self.xmaxval = float(self.xrmax.get())
return [self.xminval,self.xmaxval]
def getyaxisrange(self):
try:
float(self.yrmin.get())
except:
if self.polarvar.get() != 1:
self.yminval = min(self.y)
else:
self.yminval = min(self.Z.flat)
else:
self.yminval = float(self.yrmin.get())
try:
float(self.yrmax.get())
except:
if self.polarvar.get() != 1:
self.ymaxval = max(self.y)
else:
self.ymaxval = max(self.Z.flat)
else:
self.ymaxval = float(self.yrmax.get())
return [self.yminval,self.ymaxval]
def getplotvar(self):
self.sucess = False
if self.logvar.get() == 1:
self.var = log10(self.D.__getattribute__(self.myvar))
else:
self.var = self.D.__getattribute__(self.myvar)
if self.Geom == '1D':
self.x = self.D.x1
self.sucess = True
else:
if self.slicename == "Along x1":
self.x = self.D.x1
if self.D.n3 == 1:
try:
int(self.ex2.get().strip().split()[0])
except (ValueError, IndexError):
print "Specify the value of x2 cut"
else:
self.var = self.var[:,int(self.ex2.get())]
self.sucess = True
else:
try:
int(self.ex2.get().strip().split()[0])
int(self.ex3.get().strip().split()[0])
except (ValueError, IndexError):
print "Specify the value of x2 or x3 cut"
else:
self.var = self.var[:,int(self.ex2.get()),int(self.ex3.get())]
self.sucess = True
elif self.slicename == "Along x2":
self.x = self.D.x2
if self.D.n3 == 1:
try:
int(self.ex1.get().strip().split()[0])
except (ValueError, IndexError):
print "Specify the value of x1 cut"
else:
self.var = self.var[int(self.ex1.get()),:]
self.sucess = True
else:
try:
int(self.ex1.get().strip().split()[0])
int(self.ex3.get().strip().split()[0])
except (ValueError, IndexError):
print "Specify the value of x1 or x3 cut"
else:
self.var = self.var[int(self.ex1.get()),:,int(self.ex3.get())]
self.sucess = True
else:
self.x = self.D.x3
try:
int(self.ex1.get().strip().split()[0])
int(self.ex2.get().strip().split()[0])
except (ValueError, IndexError):
print "Specify the value of x1 or x2 cut"
else:
self.var = self.var[int(self.ex1.get()),int(self.ex2.get()),:]
self.sucess = True
return self.sucess
def getsurfvar(self):
self.sucess = False
if self.logvar.get() == 1:
self.var = log10(self.D.__getattribute__(self.myvar))
else:
self.var = self.D.__getattribute__(self.myvar)
if self.slicename == "Along x1-x2":
self.x = self.D.x1
self.y = self.D.x2
xmineed = (abs(self.x-self.getxaxisrange()[0])).argmin()
xmaneed = (abs(self.x-self.getxaxisrange()[1])).argmin()
ymineed = (abs(self.y-self.getyaxisrange()[0])).argmin()
ymaneed = (abs(self.y-self.getyaxisrange()[1])).argmin()
self.x = self.x[xmineed:xmaneed]
self.y = self.y[ymineed:ymaneed]
if self.D.n3 == 1:
self.var = self.var[xmineed:xmaneed,ymineed:ymaneed].T
self.sucess = True
else:
try:
int(self.ex3.get().strip().split()[0])
except (ValueError, IndexError):
print "Specify the value of x3 cut"
else:
self.var = self.var[xmineed:xmaneed,ymineed:ymaneed,int(self.ex3.get())].T
self.sucess = True
elif self.slicename == "Along x2-x3":
self.x = self.D.x2
self.y = self.D.x3
xmineed = (abs(self.x-self.getxaxisrange()[0])).argmin()
xmaneed = (abs(self.x-self.getxaxisrange()[1])).argmin()
ymineed = (abs(self.y-self.getyaxisrange()[0])).argmin()
ymaneed = (abs(self.y-self.getyaxisrange()[1])).argmin()
self.x = self.x[xmineed:xmaneed]
self.y = self.y[ymineed:ymaneed]
try:
int(self.ex1.get().strip().split()[0])
except (ValueError, IndexError):
print "Specify the value of x1 cut"
else:
self.var = self.var[int(self.ex1.get()),xmineed:xmaneed,ymineed:ymaneed].T
self.sucess = True
else:
self.x = self.D.x1
self.y = self.D.x3
xmineed = (abs(self.x-self.getxaxisrange()[0])).argmin()
xmaneed = (abs(self.x-self.getxaxisrange()[1])).argmin()
ymineed = (abs(self.y-self.getyaxisrange()[0])).argmin()
ymaneed = (abs(self.y-self.getyaxisrange()[1])).argmin()
self.x = self.x[xmineed:xmaneed]
self.y = self.y[ymineed:ymaneed]
try:
int(self.ex2.get().strip().split()[0])
except (ValueError, IndexError):
print "Specify the value of x2 cut"
else:
self.var = self.var[xmineed:xmaneed,int(self.ex2.get()),ymineed:ymaneed].T
self.sucess = True
return self.sucess
def drawpolar(self):
self.sucess = False
if self.slicename == "Along x1-x2":
if self.D.n3 == 1:
self.R,self.Z,self.SphData = self.I.getSphData(self.D,w_dir=self.wdir,datatype=self.datatype, rphi=False)
self.sucess = True
else:
try:
int(self.ex3.get().strip().split()[0])
except (ValueError, IndexError):
print "Specify the value of x3 cut"
else:
self.R,self.Z,self.SphData = self.I.getSphData(self.D,w_dir=self.wdir,datatype=self.datatype, rphi=False,x3cut=int(self.ex3.get()))
self.sucess = True
if self.slicename == "Along x3-x1":
try:
int(self.ex2.get().strip().split()[0])
except (ValueError, IndexError):
print "Specify the value of x2 cut"
else:
self.R,self.Z,self.SphData = self.I.getSphData(self.D,w_dir=self.wdir,datatype=self.datatype, rphi=True, x2cut=int(self.ex2.get()))
self.sucess = True
if self.sucess == True:
self.extent=(min(self.R.flat),max(self.R.flat),min(self.Z.flat),max(self.Z.flat))
self.dRR=max(self.R.flat)-min(self.R.flat)
self.dZZ=max(self.Z.flat)-min(self.Z.flat)
self.isnotnan=-isnan(self.SphData[self.myvar])
self.maxPl=max(self.SphData[self.myvar][self.isnotnan].flat)
self.minPl=min(self.SphData[self.myvar][self.isnotnan].flat)
self.normrange=False
if self.minPl<0:
self.normrange=True
if self.maxPl>-self.minPl:
self.minPl=-self.maxPl
else:
self.maxPl=-self.minPl
if (self.normrange and self.myvar !='rho' and self.myvar !='prs'):
self.SphData[self.myvar][-1][-1]=self.maxPl
self.SphData[self.myvar][-1][-2]=self.minPl
if self.logvar.get() == 1:
self.SphData[self.myvar] = log10(self.SphData[self.myvar])
return self.sucess
def drawcontour(self):
if self.polarvar.get() != 1:
if self.slicename == "Along x1-x2":
self.xcont = self.D.x1
self.ycont = self.D.x2
self.Xmesh, self.Ymesh = meshgrid(self.D.x1.T,self.D.x2.T)
if self.D.n3 == 1:
if self.plcont.get() == 'x1*Ax3':
self.varcont = self.Xmesh*(self.D.Ax3.T)
elif self.plcont.get() == 'x1*bx3':
self.varcont = self.Xmesh*(self.D.bx3.T)
else:
self.varcont = self.D.__getattribute__(self.plcont.get())[:,:].T
else:
if self.plcont.get() == 'x1*Ax3':
self.varcont = self.Xmesh*(self.D.Ax3[:,:,int(self.ex3.get())].T)
elif self.plcont.get() == 'x1*bx3':
self.varcont = self.Xmesh*(self.D.bx3[:,:,int(self.ex3.get())].T)
else:
self.varcont = self.D.__getattribute__(self.plcont.get())[:,:,int(self.ex3.get())].T
elif self.slicename == "Along x2-x3":
self.xcont = self.D.x2
self.ycont = self.D.x3
self.varcont = self.D.__getattribute__(self.plcont.get())[int(self.ex1.get()),:,:].T
else:
self.xcont = self.D.x1
self.ycont = self.D.x3
self.varcont = self.D.__getattribute__(self.plcont.get())[:,int(self.ex2.get()),:].T
else:
self.xcont = self.R
self.ycont = self.Z
if self.plcont.get() == 'x1*Ax3':
self.varcont = self.R*(self.SphData['Ax3'])
elif self.plcont.get() == 'x1*bx3':
self.varcont = self.R*(self.SphData['bx3'])
else:
if self.logvar.get() == 1 and self.plcont.get() == self.myvar:
self.varcont = 10**(self.SphData[self.plcont.get()])
else:
self.varcont = self.SphData[self.plcont.get()]
def drawarrow(self):
if self.polarvar.get() != 1:
if self.slicename == "Along x1-x2":
self.Xmesh, self.Ymesh = meshgrid(self.D.x1.T,self.D.x2.T)
self.xcong = self.Tool.congrid(self.Xmesh,2*(int(self.arrspb.get()),),method='linear')
self.ycong = self.Tool.congrid(self.Ymesh,2*(int(self.arrspb.get()),),method='linear')
if self.plarr.get() == 'Vp' or self.plarr.get() =='Vp_norm':
if self.D.n3 == 1:
self.vel1 = self.D.vx1[:,:].T
self.vel2 = self.D.vx2[:,:].T
else:
self.vel1 = self.D.vx1[:,:,int(self.ex3.get())].T
self.vel2 = self.D.vx2[:,:,int(self.ex3.get())].T
self.xveccong = self.Tool.congrid(self.vel1,2*(int(self.arrspb.get()),),method='linear')
self.yveccong = self.Tool.congrid(self.vel2,2*(int(self.arrspb.get()),),method='linear')
self.normVp = sqrt(self.xveccong**2 + self.yveccong**2)
if self.plarr.get() == 'Vp_norm':
self.xveccong = self.xveccong/self.normVp
self.yveccong = self.yveccong/self.normVp
if self.plarr.get() == 'Bp' or self.plarr.get() =='Bp_norm':
if self.D.n3 == 1:
self.mag1 = self.D.bx1[:,:].T
self.mag2 = self.D.bx2[:,:].T
else:
self.mag1 = self.D.bx1[:,:,int(self.ex3.get())].T
self.mag2 = self.D.bx2[:,:,int(self.ex3.get())].T
self.xveccong = self.Tool.congrid(self.mag1,2*(int(self.arrspb.get()),),method='linear')
self.yveccong = self.Tool.congrid(self.mag2,2*(int(self.arrspb.get()),),method='linear')
self.normVp = sqrt(self.xveccong**2 + self.yveccong**2)
if self.plarr.get() == 'Bp_norm':
self.xveccong = self.xveccong/self.normVp
self.yveccong = self.yveccong/self.normVp
elif self.slicename == "Along x2-x3":
self.Xmesh, self.Ymesh = meshgrid(self.D.x2.T,self.D.x3.T)
self.xcong = self.Tool.congrid(self.Xmesh,2*(int(self.arrspb.get()),),method='linear')
self.ycong = self.Tool.congrid(self.Ymesh,2*(int(self.arrspb.get()),),method='linear')
if self.plarr.get() == 'Vp' or self.plarr.get() =='Vp_norm':
self.vel1 = self.D.vx2[int(self.ex1.get()),:,:].T
self.vel2 = self.D.vx3[int(self.ex1.get()),:,:].T
self.xveccong = self.Tool.congrid(self.vel1,2*(int(self.arrspb.get()),),method='linear')
self.yveccong = self.Tool.congrid(self.vel2,2*(int(self.arrspb.get()),),method='linear')
self.normVp = sqrt(self.xveccong**2 + self.yveccong**2)
if self.plarr.get() == 'Vp_norm':
self.xveccong = self.xveccong/self.normVp
self.yveccong = self.yveccong/self.normVp
if self.plarr.get() == 'Bp' or self.plarr.get() =='Bp_norm':
self.mag1 = self.D.bx2[int(self.ex1.get()),:,:].T
self.mag2 = self.D.bx3[int(self.ex1.get()),:,:].T
self.xveccong = self.Tool.congrid(self.mag1,2*(int(self.arrspb.get()),),method='linear')
self.yveccong = self.Tool.congrid(self.mag2,2*(int(self.arrspb.get()),),method='linear')
self.normVp = sqrt(self.xveccong**2 + self.yveccong**2)
if self.plarr.get() == 'Bp_norm':
self.xveccong = self.xveccong/self.normVp
self.yveccong = self.yveccong/self.normVp
else:
self.Xmesh, self.Ymesh = meshgrid(self.D.x1.T,self.D.x3.T)
self.xcong = self.Tool.congrid(self.Xmesh,2*(int(self.arrspb.get()),),method='linear')
self.ycong = self.Tool.congrid(self.Ymesh,2*(int(self.arrspb.get()),),method='linear')
if self.plarr.get() == 'Vp' or self.plarr.get() =='Vp_norm':
self.vel1 = self.D.vx1[:,int(self.ex2.get()),:].T
self.vel2 = self.D.vx3[:,int(self.ex2.get()),:].T
self.xveccong = self.Tool.congrid(self.vel1,2*(int(self.arrspb.get()),),method='linear')
self.yveccong = self.Tool.congrid(self.vel2,2*(int(self.arrspb.get()),),method='linear')
self.normVp = sqrt(self.xveccong**2 + self.yveccong**2)
if self.plarr.get() == 'Vp_norm':
self.xveccong = self.xveccong/self.normVp
self.yveccong = self.yveccong/self.normVp
if self.plarr.get() == 'Bp' or self.plarr.get() =='Bp_norm':
self.mag1 = self.D.bx1[:,int(self.ex2.get()),:].T
self.mag2 = self.D.bx3[:,int(self.ex2.get()),:].T
self.xveccong = self.Tool.congrid(self.mag1,2*(int(self.arrspb.get()),),method='linear')
self.yveccong = self.Tool.congrid(self.mag2,2*(int(self.arrspb.get()),),method='linear')
self.normVp = sqrt(self.xveccong**2 + self.yveccong**2)
if self.plarr.get() == 'Bp_norm':
self.xveccong = self.xveccong/self.normVp
self.yveccong = self.yveccong/self.normVp
else:
self.xcong = self.Tool.congrid(self.R,2*(int(self.arrspb.get()),),method='linear')
self.ycong = self.Tool.congrid(self.Z,2*(int(self.arrspb.get()),),method='linear')
if self.plarr.get() == 'Vp' or self.plarr.get() =='Vp_norm':
if self.slicename == "Along x1-x2":
self.vel1 = self.SphData['v1c']
self.vel2 = self.SphData['v2c']
else:
self.vel1 = self.SphData['v1c']
self.vel2 = self.SphData['v3c']
self.xveccong = self.Tool.congrid(self.vel1,2*(int(self.arrspb.get()),),method='linear')
self.yveccong = self.Tool.congrid(self.vel2,2*(int(self.arrspb.get()),),method='linear')
self.normVp = sqrt(self.xveccong**2 + self.yveccong**2)
if self.plarr.get() == 'Vp_norm':
self.xveccong = self.xveccong/self.normVp
self.yveccong = self.yveccong/self.normVp
if self.plarr.get() == 'Bp' or self.plarr.get() =='Bp_norm':
if self.slicename == "Along x1-x2":
self.mag1 = self.SphData['b1c']
self.mag2 = self.SphData['b2c']
else:
self.mag1 = self.SphData['b1c']
self.mag2 = self.SphData['b3c']
self.xveccong = self.Tool.congrid(self.mag1,2*(int(self.arrspb.get()),),method='linear')
self.yveccong = self.Tool.congrid(self.mag2,2*(int(self.arrspb.get()),),method='linear')
self.normVp = sqrt(self.xveccong**2 + self.yveccong**2)
if self.plarr.get() == 'Bp_norm':
self.xveccong = self.xveccong/self.normVp
self.yveccong = self.yveccong/self.normVp
def epssave(self):
self.f.savefig(self.myvar+'_'+self.enstep.get()+'.eps')
def pngsave(self):
self.f.savefig(self.myvar+'_'+self.enstep.get()+'.png')
def pdfsave(self):
self.f.savefig(self.myvar+'_'+self.enstep.get()+'.pdf')
def jpgsave(self):
self.f.savefig(self.myvar+'_'+self.enstep.get()+'.jpg')
root=Tk()
app=App(root)
root.title("pyPLUTO")
menubar = Menu(root)
savemenu = Menu(menubar,tearoff=0)
savemenu.add_command(label='EPS',command=app.epssave)
savemenu.add_command(label='PDF',command=app.pdfsave)
savemenu.add_command(label='PNG',command=app.pngsave)
savemenu.add_command(label='JPG',command=app.jpgsave)
menubar.add_cascade(label="Save As", menu=savemenu)
#menubar.add_command(label='Plot',command = app.plotfinal)
#menubar.add_command(label='Surface',command=app.plotsurface)
#menubar.add_command(label='Clear',command=app.plotclear)
menubar.add_command(label='Quit',command=root.quit)
root.config(menu=menubar)
root.mainloop()
| aywander/pluto-outflows | Tools/pyPLUTO/bin/GUI_pyPLUTO.py | Python | gpl-2.0 | 36,620 |
from Screens.Screen import Screen
from Components.ConfigList import ConfigListScreen, ConfigList
from Components.ActionMap import ActionMap
from Components.Sources.StaticText import StaticText
from Components.config import config, ConfigSubsection, ConfigBoolean, getConfigListEntry, ConfigSelection, ConfigYesNo, ConfigIP
from Components.Network import iNetwork
from Components.Ipkg import IpkgComponent
from enigma import eDVBDB
config.misc.installwizard = ConfigSubsection()
config.misc.installwizard.hasnetwork = ConfigBoolean(default = False)
config.misc.installwizard.ipkgloaded = ConfigBoolean(default = False)
config.misc.installwizard.channellistdownloaded = ConfigBoolean(default = False)
class InstallWizard(Screen, ConfigListScreen):
STATE_UPDATE = 0
STATE_CHOISE_CHANNELLIST = 1
STATE_CHOISE_SOFTCAM = 2
def __init__(self, session, args = None):
Screen.__init__(self, session)
self.index = args
self.list = []
ConfigListScreen.__init__(self, self.list)
if self.index == self.STATE_UPDATE:
config.misc.installwizard.hasnetwork.value = False
config.misc.installwizard.ipkgloaded.value = False
modes = {0: " "}
self.enabled = ConfigSelection(choices = modes, default = 0)
self.adapters = [(iNetwork.getFriendlyAdapterName(x),x) for x in iNetwork.getAdapterList()]
is_found = False
for x in self.adapters:
if x[1] == 'eth0' or x[1] == 'eth1':
if iNetwork.getAdapterAttribute(x[1], 'up'):
self.ipConfigEntry = ConfigIP(default = iNetwork.getAdapterAttribute(x[1], "ip"))
iNetwork.checkNetworkState(self.checkNetworkCB)
if_found = True
else:
iNetwork.restartNetwork(self.checkNetworkLinkCB)
break
if is_found is False:
self.createMenu()
elif self.index == self.STATE_CHOISE_CHANNELLIST:
self.enabled = ConfigYesNo(default = True)
modes = {"openxta": "XTA(13e-19e)", "19e": "Astra 1", "23e": "Astra 3", "19e-23e": "Astra 1 Astra 3", "19e-23e-28e": "Astra 1 Astra 2 Astra 3", "13e-19e-23e-28e": "Astra 1 Astra 2 Astra 3 Hotbird"}
self.channellist_type = ConfigSelection(choices = modes, default = "openxta")
self.createMenu()
elif self.index == self.STATE_CHOISE_SOFTCAM:
self.enabled = ConfigYesNo(default = True)
modes = {"cccam": _("default") + " (CCcam)", "scam": "scam"}
self.softcam_type = ConfigSelection(choices = modes, default = "cccam")
self.createMenu()
def checkNetworkCB(self, data):
if data < 3:
config.misc.installwizard.hasnetwork.value = True
self.createMenu()
def checkNetworkLinkCB(self, retval):
if retval:
iNetwork.checkNetworkState(self.checkNetworkCB)
else:
self.createMenu()
def createMenu(self):
try:
test = self.index
except:
return
self.list = []
if self.index == self.STATE_UPDATE:
if config.misc.installwizard.hasnetwork.value:
self.list.append(getConfigListEntry(_("Your internet connection is working (ip: %s)") % (self.ipConfigEntry.getText()), self.enabled))
else:
self.list.append(getConfigListEntry(_("Your receiver does not have an internet connection"), self.enabled))
elif self.index == self.STATE_CHOISE_CHANNELLIST:
self.list.append(getConfigListEntry(_("Install channel list"), self.enabled))
if self.enabled.value:
self.list.append(getConfigListEntry(_("Channel list type"), self.channellist_type))
elif self.index == self.STATE_CHOISE_SOFTCAM:
self.list.append(getConfigListEntry(_("Install softcam"), self.enabled))
if self.enabled.value:
self.list.append(getConfigListEntry(_("Softcam type"), self.softcam_type))
self["config"].list = self.list
self["config"].l.setList(self.list)
def keyLeft(self):
if self.index == 0:
return
ConfigListScreen.keyLeft(self)
self.createMenu()
def keyRight(self):
if self.index == 0:
return
ConfigListScreen.keyRight(self)
self.createMenu()
def run(self):
if self.index == self.STATE_UPDATE:
if config.misc.installwizard.hasnetwork.value:
self.session.open(InstallWizardIpkgUpdater, self.index, _('Please wait (updating packages)'), IpkgComponent.CMD_UPDATE)
elif self.index == self.STATE_CHOISE_CHANNELLIST and self.enabled.value:
self.session.open(InstallWizardIpkgUpdater, self.index, _('Please wait (downloading channel list)'), IpkgComponent.CMD_REMOVE, {'package': 'enigma2-plugin-settings-henksat-' + self.channellist_type.value})
elif self.index == self.STATE_CHOISE_SOFTCAM and self.enabled.value:
self.session.open(InstallWizardIpkgUpdater, self.index, _('Please wait (downloading softcam)'), IpkgComponent.CMD_INSTALL, {'package': 'enigma2-plugin-softcams-' + self.softcam_type.value})
return
class InstallWizardIpkgUpdater(Screen):
skin = """
<screen position="c-300,c-25" size="600,50" title=" ">
<widget source="statusbar" render="Label" position="10,5" zPosition="10" size="e-10,30" halign="center" valign="center" font="Regular;22" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
</screen>"""
def __init__(self, session, index, info, cmd, pkg = None):
self.skin = InstallWizardIpkgUpdater.skin
Screen.__init__(self, session)
self["statusbar"] = StaticText(info)
self.pkg = pkg
self.index = index
self.state = 0
self.ipkg = IpkgComponent()
self.ipkg.addCallback(self.ipkgCallback)
if self.index == InstallWizard.STATE_CHOISE_CHANNELLIST:
self.ipkg.startCmd(cmd, {'package': 'enigma2-plugin-settings-*'})
else:
self.ipkg.startCmd(cmd, pkg)
def ipkgCallback(self, event, param):
if event == IpkgComponent.EVENT_DONE:
if self.index == InstallWizard.STATE_UPDATE:
config.misc.installwizard.ipkgloaded.value = True
elif self.index == InstallWizard.STATE_CHOISE_CHANNELLIST:
if self.state == 0:
self.ipkg.startCmd(IpkgComponent.CMD_INSTALL, self.pkg)
self.state = 1
return
else:
config.misc.installwizard.channellistdownloaded.value = True
eDVBDB.getInstance().reloadBouquets()
eDVBDB.getInstance().reloadServicelist()
self.close()
| XTAv2/Enigma2 | lib/python/Screens/InstallWizard.py | Python | gpl-2.0 | 5,974 |
from __future__ import with_statement
from commands import getoutput
from contextlib import closing
from email.mime.text import MIMEText
from email.utils import make_msgid
from genshi.template import NewTextTemplate as TextTemplate
import json
import logging
import os
import requests
import smtplib
from socket import gethostname
log = logging.getLogger(__name__)
# 64K should be enough for anyone.
MAX_OUTPUT_SIZE = 64 * 1024
default_recipient = os.environ.get('PROCCER_DEFAULT_NOTIFY')
mail_from = os.environ.get('PROCCER_MAIL_FROM', 'proccer@localhost')
mail_reply_to = os.environ.get('PROCCER_REPLY_TO')
smtp_host = os.environ.get('SMTP_HOST', 'localhost')
disable_email = 'PROCCER_DISABLE_EMAIL' in os.environ
web_url = os.environ.get('PROCCER_WEB_URL', '').strip('/')
default_api_url = 'https://slack.com/services/hooks/incoming-webhook'
slack_api_url = os.environ.get('SLACK_API_URL', default_api_url)
slack_api_token = os.environ.get('SLACK_API_TOKEN')
slack_channel = os.environ.get('SLACK_CHANNEL', '#general')
slack_post_timeout = 10
def state_change_notification(job, result):
msg, rcpt = mail_for_state(job, job.state, result)
if msg:
send_mail(msg, rcpt)
notify_slack(job, job.state)
def repeat_notification(job):
job_result = job.results.first()
result = {
'output': job_result.output if job_result else '',
'config': {},
}
msg, rcpt = mail_for_state(job, 'still ' + job.state, result)
if msg:
send_mail(msg, rcpt)
notify_slack(job, 'still ' + job.state)
def notify_slack(job, state):
if not slack_api_token:
return
url = '%s/job/%d/' % (web_url, job.id)
color = slack_colors.get(state.replace('still ', ''), 'warning')
text = '<%s|%s> %s' % (url, unicode(job), state)
payload = {
'channel': slack_channel,
'username': 'proccer',
'icon_emoji': ':penguin:',
'attachments': [
{
'color': color,
'text': text,
'fallback': text,
},
],
}
response = requests.post(
slack_api_url,
params={'token': slack_api_token},
data={'payload': json.dumps(payload)},
timeout=slack_post_timeout
)
response.raise_for_status()
slack_colors = {
'ok': 'good',
'late': 'warning',
'error': 'danger', # Will Robinson!
}
def mail_for_state(job, state, result):
if not (job.notify or default_recipient):
log.debug('nobody to notify for job %r state-change', job.id)
return None, None
rcpt = job.notify or [default_recipient]
tag = '[%s]' % unicode(job).replace(' ', '')
subject = '%s %s' % (tag, state)
values = {
'url': web_url,
'getoutput': getoutput,
'job': job,
'state': state,
}
if result:
values['output'] = result['output']
values['config'] = result['config']
values['output_truncated'] = len(values['output']) > MAX_OUTPUT_SIZE
if values['output_truncated']:
values['output'] = values['output'][:MAX_OUTPUT_SIZE]
body = body_template.generate(**values).render('text')
msg = MIMEText(body, 'plain', 'utf-8')
msg['Message-ID'] = make_msgid(gethostname())
msg['Subject'] = subject
msg['From'] = mail_from
if mail_reply_to:
msg['Reply-To'] = mail_reply_to
msg['To'] = ', '.join(rcpt)
log.info('sending %s notification for job %r to %r message-id %s',
state, job.id, rcpt, msg['Message-ID'])
return msg, rcpt
def send_mail(msg, rcpt):
if disable_email:
return
env_rcpt = rcpt
env_from = 'proccer@' + gethostname()
with closing(smtplib.SMTP(smtp_host)) as smtp:
smtp.sendmail(env_from, env_rcpt, msg.as_string())
body_template = TextTemplate('''\
{% if state == 'ok' %}${getoutput('cowsay "Job okay."')}{% end %}\
{% if state == 'error' %}${getoutput('cowsay -e OO "JOB FAILED!"')}{% end %}\
{% if state not in ['ok', 'error'] %}\
${getoutput('cowsay -e Oo "Job %s"' % state)}\
{% end %}
Job: ${job}
{% if url %}
URL: ${url}/job/${job.id}/
{% end %}\
Last seen: ${job.last_seen}
{% if defined('config') %}\
Command: ${config.get('command', '??')}
{% end %}\
{% if defined('output') %}\
Output:
${'\\n'.join(' ' + line for line in output.split('\\n'))}
{% if output_truncated %}\
Output truncated.
{% end %}\
{% end %}\
''')
| CSIS/proccer | src/proccer/notifications.py | Python | mit | 4,433 |
if __name__ == '__main__':
a = int(raw_input())
b = int(raw_input())
print a + b
print a - b
print a * b
| LuisUrrutia/hackerrank | python/introduction/python-arithmetic-operators.py | Python | mit | 126 |
#-*-PYTHON-*-
import collections
import functools
# https://wiki.python.org/moin/PythonDecoratorLibrary#Memoize
class memoized(object):
'''Decorator. Caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned
(not reevaluated).
'''
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
if not isinstance(args, collections.Hashable):
# uncacheable. a list, for instance.
# better to not cache than blow up.
return self.func(*args)
if args in self.cache:
return self.cache[args]
else:
value = self.func(*args)
self.cache[args] = value
return value
def __repr__(self):
'''Return the function's docstring.'''
return self.func.__doc__
def __get__(self, obj, objtype):
'''Support instance methods.'''
return functools.partial(self.__call__, obj)
| leighklotz/traffic-map | pricing/memoizer.py | Python | gpl-2.0 | 1,030 |
"""
Wrapper for k-means clustering that takes cares of reshaping and generating labels.
"""
from __future__ import division
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import project_config
import sklearn.cluster
def perform_kMeans_clustering_analysis(feature_data, n_clusters):
"""
Cluster voxel time courses into n_clusters based on euclean distances
between them. It treats all processed BOLD images of each time course as
a separate feature, i.e. in total, img_data.shape[-1] features.
Parameters
----------
feature_data : dimension 1:3 is 3d volumn. The last dimension is a list of feature values.
n_clusters : no. of clusters to segregate the time courses into.
Returns
-------
labels : array has the same shape as img_data.shape. Each elem is the
cluster label of the data.
"""
kMeans = sklearn.cluster.KMeans(n_clusters)
feature_data_2d = feature_data.reshape((-1,feature_data.shape[-1]))
labels = kMeans.fit_predict(feature_data_2d)
return labels.reshape(feature_data.shape[:-1]) | nhejazi/project-gamma | code/utils/kmeans.py | Python | bsd-3-clause | 1,066 |
from pywink.devices.base import WinkDevice
SENSOR_FIELDS_TO_UNITS = {"humidity": "%", "temperature": u'\N{DEGREE SIGN}', "brightness": "%", "proximity": ""}
class WinkSensor(WinkDevice):
"""
Represents a Wink sensor.
"""
def __init__(self, device_state_as_json, api_interface, sensor_type_info):
super(WinkSensor, self).__init__(device_state_as_json, api_interface)
self.sensor_type_info = sensor_type_info
def unit(self):
return SENSOR_FIELDS_TO_UNITS.get(self.capability(), None)
def unit_type(self):
return self.sensor_type_info.get("type")
def capability(self):
return self.sensor_type_info.get("field")
def tamper_detected(self):
tamper = self._last_reading.get('tamper_detected', False)
# If tamper was never detected it is set to None, not False
if tamper is None:
tamper = False
return tamper
def name(self):
return self.json_state.get("name") + " " + self.capability()
def state(self):
return self._last_reading.get(self.capability())
def pubnub_update(self, json_response):
humidity = json_response['last_reading'].get("humidity")
# humidity is returned from pubnub on some sensors as a float
if humidity is not None:
if humidity < 1.0:
json_response["last_reading"]["humidity"] = humidity * 100
self.json_state = json_response
| Haynie-Research-and-Development/jarvis | deps/lib/python3.4/site-packages/pywink/devices/sensor.py | Python | gpl-2.0 | 1,453 |
import numpy as np
import theano as theano
import theano.tensor as T
from theano.gradient import grad_clip
import time
import operator
class GRUTheano:
def __init__(self, word_dim, hidden_dim=128, bptt_truncate=-1):
# Assign instance variables
self.word_dim = word_dim
self.hidden_dim = hidden_dim
self.bptt_truncate = bptt_truncate
# Initialize the network parameters
E = np.random.uniform(-np.sqrt(1./word_dim), np.sqrt(1./word_dim), (hidden_dim, word_dim))
U = np.random.uniform(-np.sqrt(1./hidden_dim), np.sqrt(1./hidden_dim), (6, hidden_dim, hidden_dim))
W = np.random.uniform(-np.sqrt(1./hidden_dim), np.sqrt(1./hidden_dim), (6, hidden_dim, hidden_dim))
V = np.random.uniform(-np.sqrt(1./hidden_dim), np.sqrt(1./hidden_dim), (word_dim, hidden_dim))
b = np.zeros((6, hidden_dim))
c = np.zeros(word_dim)
# Theano: Created shared variables
self.E = theano.shared(name='E', value=E.astype(theano.config.floatX))
self.U = theano.shared(name='U', value=U.astype(theano.config.floatX))
self.W = theano.shared(name='W', value=W.astype(theano.config.floatX))
self.V = theano.shared(name='V', value=V.astype(theano.config.floatX))
self.b = theano.shared(name='b', value=b.astype(theano.config.floatX))
self.c = theano.shared(name='c', value=c.astype(theano.config.floatX))
# SGD / rmsprop: Initialize parameters
self.mE = theano.shared(name='mE', value=np.zeros(E.shape).astype(theano.config.floatX))
self.mU = theano.shared(name='mU', value=np.zeros(U.shape).astype(theano.config.floatX))
self.mV = theano.shared(name='mV', value=np.zeros(V.shape).astype(theano.config.floatX))
self.mW = theano.shared(name='mW', value=np.zeros(W.shape).astype(theano.config.floatX))
self.mb = theano.shared(name='mb', value=np.zeros(b.shape).astype(theano.config.floatX))
self.mc = theano.shared(name='mc', value=np.zeros(c.shape).astype(theano.config.floatX))
# We store the Theano graph here
self.theano = {}
self.__theano_build__()
def __theano_build__(self):
E, V, U, W, b, c = self.E, self.V, self.U, self.W, self.b, self.c
x = T.ivector('x')
y = T.ivector('y')
def forward_prop_step(x_t, s_t1_prev, s_t2_prev):
# This is how we calculated the hidden state in a simple RNN. No longer!
# s_t = T.tanh(U[:,x_t] + W.dot(s_t1_prev))
# Word embedding layer
x_e = E[:,x_t]
# GRU Layer 1
z_t1 = T.nnet.hard_sigmoid(U[0].dot(x_e) + W[0].dot(s_t1_prev) + b[0])
r_t1 = T.nnet.hard_sigmoid(U[1].dot(x_e) + W[1].dot(s_t1_prev) + b[1])
c_t1 = T.tanh(U[2].dot(x_e) + W[2].dot(s_t1_prev * r_t1) + b[2])
s_t1 = (T.ones_like(z_t1) - z_t1) * c_t1 + z_t1 * s_t1_prev
# GRU Layer 2
z_t2 = T.nnet.hard_sigmoid(U[3].dot(s_t1) + W[3].dot(s_t2_prev) + b[3])
r_t2 = T.nnet.hard_sigmoid(U[4].dot(s_t1) + W[4].dot(s_t2_prev) + b[4])
c_t2 = T.tanh(U[5].dot(s_t1) + W[5].dot(s_t2_prev * r_t2) + b[5])
s_t2 = (T.ones_like(z_t2) - z_t2) * c_t2 + z_t2 * s_t2_prev
# Final output calculation
# Theano's softmax returns a matrix with one row, we only need the row
o_t = T.nnet.softmax(V.dot(s_t2) + c)[0]
return [o_t, s_t1, s_t2]
[o, s, s2], updates = theano.scan(
forward_prop_step,
sequences=x,
truncate_gradient=self.bptt_truncate,
outputs_info=[None,
dict(initial=T.zeros(self.hidden_dim)),
dict(initial=T.zeros(self.hidden_dim))])
prediction = T.argmax(o, axis=1)
o_error = T.sum(T.nnet.categorical_crossentropy(o, y))
# Total cost (could add regularization here)
cost = o_error
# Gradients
dE = T.grad(cost, E)
dU = T.grad(cost, U)
dW = T.grad(cost, W)
db = T.grad(cost, b)
dV = T.grad(cost, V)
dc = T.grad(cost, c)
# Assign functions
self.predict = theano.function([x], o)
self.predict_class = theano.function([x], prediction)
self.ce_error = theano.function([x, y], cost)
self.bptt = theano.function([x, y], [dE, dU, dW, db, dV, dc])
# SGD parameters
learning_rate = T.scalar('learning_rate')
decay = T.scalar('decay')
# rmsprop cache updates
mE = decay * self.mE + (1 - decay) * dE ** 2
mU = decay * self.mU + (1 - decay) * dU ** 2
mW = decay * self.mW + (1 - decay) * dW ** 2
mV = decay * self.mV + (1 - decay) * dV ** 2
mb = decay * self.mb + (1 - decay) * db ** 2
mc = decay * self.mc + (1 - decay) * dc ** 2
self.sgd_step = theano.function(
[x, y, learning_rate, theano.Param(decay, default=0.9)],
[],
updates=[(E, E - learning_rate * dE / T.sqrt(mE + 1e-6)),
(U, U - learning_rate * dU / T.sqrt(mU + 1e-6)),
(W, W - learning_rate * dW / T.sqrt(mW + 1e-6)),
(V, V - learning_rate * dV / T.sqrt(mV + 1e-6)),
(b, b - learning_rate * db / T.sqrt(mb + 1e-6)),
(c, c - learning_rate * dc / T.sqrt(mc + 1e-6)),
(self.mE, mE),
(self.mU, mU),
(self.mW, mW),
(self.mV, mV),
(self.mb, mb),
(self.mc, mc)
])
def calculate_total_loss(self, X, Y):
return np.sum([self.ce_error(x,y) for x,y in zip(X,Y)])
def calculate_loss(self, X, Y):
# Divide calculate_loss by the number of words
num_words = np.sum([len(y) for y in Y])
return self.calculate_total_loss(X,Y)/float(num_words)
| ctogle/nnets | src/nnets/nnetworks/ngrurnn.py | Python | mit | 6,144 |
"""Template tags relating to plugins."""
from django import template
import importlib
from django.utils.safestring import mark_safe
from happening.utils import convert_to_underscore
register = template.Library()
@register.simple_tag(takes_context=True)
def navigation_items(context, *params):
"""Render navigation items."""
from happening import plugins
return mark_safe(plugins.render_navigation_items(context.flatten()))
@register.simple_tag(takes_context=True)
def plugin_block(context, key, *params):
"""Make a space for plugins to place content in a template."""
from happening import plugins
return mark_safe(" ".join([p(context['request'], *params) for plugin_id,
p in plugins.plugin_blocks.get(key, [])
if plugins.plugin_enabled(plugin_id)]))
@register.filter()
def get_configuration(configuration_path, object=None):
"""Get a configuration variable.
Configuration path should be e.g. groups.MaxNumberOfMembers.
If there is an object for the configuration, pass this as a
second variable
"""
parts = configuration_path.rsplit(".", 1)
# The final part is the variable, everything before that is the module
p = importlib.import_module(parts[0])
return getattr(p, parts[1])(object).render()
raise Exception("Can not find configuration variable %s"
% configuration_path)
@register.filter()
def configuration_is_enabled(configuration_path, object=None):
"""Check if configuration variable is enabled.
Configuration path should be e.g. groups.MaxNumberOfMembers.
If there is an object for the configuration, pass this as a
second variable
"""
parts = configuration_path.rsplit(".", 1)
# The final part is the variable, everything before that is the module
p = importlib.import_module(parts[0])
return getattr(p, parts[1])(object).is_enabled()
raise Exception("Can not find configuration variable %s"
% configuration_path)
@register.filter()
def properties_as_table(configuration, properties):
"""Format properties as a table."""
ret = []
for p in configuration:
k = convert_to_underscore(p['name'])
if k in properties and properties[k]:
if p['type'] == 'URLField':
properties[k] = '<a href="%s">%s</a>' % (properties[k],
properties[k])
ret.append(
"<tr><th>%s</th><td>%s</td></tr>" % (p['name'], properties[k]))
return mark_safe("".join(ret))
@register.filter
def theme_settings(site):
"""Output template variables in a css block."""
styles = [
"--%s: %s;" % (k, v["value"]) for k, v in
list(site.get_theme_settings().items())]
return mark_safe(
'<style type="text/css">:root {%s}</style>' % "".join(styles))
| jscott1989/happening | src/happening/templatetags/plugins.py | Python | mit | 2,896 |
import warnings
import pytest
from sqlalchemy.exc import SAWarning, SQLAlchemyError
from ichnaea.conftest import GB_LAT, GB_LON
from ichnaea.models import encode_mac, ReportSource
from ichnaea.models.wifi import WifiShard, WifiShard0, WifiShardF
from ichnaea import util
class TestWifiShard(object):
def test_shard_id(self):
assert WifiShard.shard_id("111101123456") == "0"
assert WifiShard.shard_id("0000f0123456") == "f"
assert WifiShard.shard_id("") is None
assert WifiShard.shard_id(None) is None
mac = encode_mac("0000f0123456")
assert WifiShard.shard_id(mac) == "f"
def test_shard_model(self):
assert WifiShard.shard_model("111101123456") is WifiShard0
assert WifiShard.shard_model("0000f0123456") is WifiShardF
assert WifiShard.shard_model("") is None
assert WifiShard.shard_model(None) is None
mac = encode_mac("0000f0123456")
assert WifiShard.shard_model(mac) is WifiShardF
def test_init(self, session):
wifi = WifiShard0(mac="111101123456")
session.add(wifi)
session.flush()
wifis = (
session.query(WifiShard0).filter(WifiShard0.mac == "111101123456")
).all()
assert wifis[0].mac == "111101123456"
def test_init_empty(self, session):
with warnings.catch_warnings():
warnings.simplefilter("ignore", SAWarning)
session.add(WifiShard0())
with pytest.raises(SQLAlchemyError):
session.flush()
def test_init_fail(self, session):
session.add(WifiShard0(mac="abc"))
with pytest.raises(SQLAlchemyError):
session.flush()
def test_fields(self, session):
now = util.utcnow()
today = now.date()
session.add(
WifiShard.create(
mac="111101123456",
created=now,
modified=now,
lat=GB_LAT,
max_lat=GB_LAT,
min_lat=GB_LAT,
lon=GB_LON,
max_lon=GB_LON,
min_lon=GB_LON,
radius=200,
region="GB",
samples=10,
source=ReportSource.gnss,
weight=1.5,
last_seen=today,
block_first=today,
block_last=today,
block_count=1,
_raise_invalid=True,
)
)
session.flush()
wifi = session.query(WifiShard0).first()
assert wifi.mac == "111101123456"
assert wifi.created == now
assert wifi.modified == now
assert wifi.lat == GB_LAT
assert wifi.max_lat == GB_LAT
assert wifi.min_lat == GB_LAT
assert wifi.lon == GB_LON
assert wifi.max_lon == GB_LON
assert wifi.min_lon == GB_LON
assert wifi.radius == 200
assert wifi.region == "GB"
assert wifi.samples == 10
assert wifi.source == ReportSource.gnss
assert wifi.weight == 1.5
assert wifi.last_seen == today
assert wifi.block_first == today
assert wifi.block_last == today
assert wifi.block_count == 1
def test_mac_unhex(self, session):
stmt = 'insert into wifi_shard_0 (mac) values (unhex("111101123456"))'
session.execute(stmt)
session.flush()
wifi = session.query(WifiShard0).one()
assert wifi.mac == "111101123456"
def test_mac_hex(self, session):
session.add(WifiShard0(mac="111101123456"))
session.flush()
stmt = "select hex(`mac`) from wifi_shard_0"
row = session.execute(stmt).fetchone()
assert row == ("111101123456",)
| mozilla/ichnaea | ichnaea/models/tests/test_wifi.py | Python | apache-2.0 | 3,732 |
from __future__ import absolute_import
# Copyright (c) 2010-2015 openpyxl
from openpyxl.compat import unicode
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.descriptors import (
Alias,
Typed,
Set,
NoneSet,
Sequence,
String,
Bool,
MinMax,
Integer
)
from openpyxl.descriptors.excel import (
HexBinary,
TextPoint,
Coordinate,
ExtensionList
)
from openpyxl.descriptors.nested import (
NestedInteger,
NestedString,
NestedText,
NestedValue,
EmptyTag
)
from openpyxl.xml.constants import DRAWING_NS
from .colors import ColorChoiceDescriptor
from .effect import *
from .fill import *
from .shapes import (
LineProperties,
Color,
Scene3D
)
from openpyxl.descriptors.excel import ExtensionList as OfficeArtExtensionList
class EmbeddedWAVAudioFile(Serialisable):
name = Typed(expected_type=String, allow_none=True)
def __init__(self,
name=None,
):
self.name = name
class Hyperlink(Serialisable):
invalidUrl = Typed(expected_type=String, allow_none=True)
action = Typed(expected_type=String, allow_none=True)
tgtFrame = Typed(expected_type=String, allow_none=True)
tooltip = Typed(expected_type=String, allow_none=True)
history = Typed(expected_type=Bool, allow_none=True)
highlightClick = Typed(expected_type=Bool, allow_none=True)
endSnd = Typed(expected_type=Bool, allow_none=True)
snd = Typed(expected_type=EmbeddedWAVAudioFile, allow_none=True)
extLst = Typed(expected_type=OfficeArtExtensionList, allow_none=True)
def __init__(self,
invalidUrl=None,
action=None,
tgtFrame=None,
tooltip=None,
history=None,
highlightClick=None,
endSnd=None,
snd=None,
extLst=None,
):
self.invalidUrl = invalidUrl
self.action = action
self.tgtFrame = tgtFrame
self.tooltip = tooltip
self.history = history
self.highlightClick = highlightClick
self.endSnd = endSnd
self.snd = snd
self.extLst = extLst
class Font(Serialisable):
tagname = "latin"
namespace = DRAWING_NS
typeface = String()
panose = Typed(expected_type=HexBinary, allow_none=True)
pitchFamily = Typed(expected_type=MinMax, allow_none=True)
charset = Typed(expected_type=MinMax, allow_none=True)
def __init__(self,
typeface=None,
panose=None,
pitchFamily=None,
charset=None,
):
self.typeface = typeface
self.panose = panose
self.pitchFamily = pitchFamily
self.charset = charset
class CharacterProperties(Serialisable):
tagname = "defRPr"
namespace = DRAWING_NS
kumimoji = Bool(allow_none=True)
lang = String(allow_none=True)
altLang = String(allow_none=True)
sz = Integer()
b = Bool(allow_none=True)
i = Bool(allow_none=True)
u = NoneSet(values=(['words', 'sng', 'dbl', 'heavy', 'dotted',
'dottedHeavy', 'dash', 'dashHeavy', 'dashLong', 'dashLongHeavy',
'dotDash', 'dotDashHeavy', 'dotDotDash', 'dotDotDashHeavy', 'wavy',
'wavyHeavy', 'wavyDbl']))
strike = NoneSet(values=(['noStrike', 'sngStrike', 'dblStrike']))
kern = Integer(allow_none=True)
cap = NoneSet(values=(['small', 'all']))
spc = Integer(allow_none=True)
normalizeH = Bool(allow_none=True)
baseline = Integer(allow_none=True)
noProof = Bool(allow_none=True)
dirty = Bool(allow_none=True)
err = Bool(allow_none=True)
smtClean = Bool(allow_none=True)
smtId = Integer(allow_none=True)
bmk = String(allow_none=True)
ln = Typed(expected_type=LineProperties, allow_none=True)
highlight = Typed(expected_type=Color, allow_none=True)
latin = Typed(expected_type=Font, allow_none=True)
ea = Typed(expected_type=Font, allow_none=True)
cs = Typed(expected_type=Font, allow_none=True)
sym = Typed(expected_type=Font, allow_none=True)
hlinkClick = Typed(expected_type=Hyperlink, allow_none=True)
hlinkMouseOver = Typed(expected_type=Hyperlink, allow_none=True)
rtl = Bool(nested=True, allow_none=True)
extLst = Typed(expected_type=OfficeArtExtensionList, allow_none=True)
# uses element group EG_FillProperties
noFill = EmptyTag(namespace=DRAWING_NS)
solidFill = ColorChoiceDescriptor()
gradFill = Typed(expected_type=GradientFillProperties, allow_none=True)
blipFill = Typed(expected_type=BlipFillProperties, allow_none=True)
pattFill = Typed(expected_type=PatternFillProperties, allow_none=True)
grpFill = EmptyTag(namespace=DRAWING_NS)
# uses element group EG_EffectProperties
effectLst = Typed(expected_type=EffectList, allow_none=True)
effectDag = Typed(expected_type=EffectContainer, allow_none=True)
# uses element group EG_TextUnderlineLine
uLnTx = EmptyTag()
uLn = Typed(expected_type=LineProperties, allow_none=True)
# uses element group EG_TextUnderlineFill
uFillTx = EmptyTag()
uFill = EmptyTag()
__elements__ = ('ln', 'highlight', 'latin', 'ea', 'cs', 'sym',
'hlinkClick', 'hlinkMouseOver', 'rtl', 'noFill', 'solidFill', 'gradFill',
'blipFill', 'pattFill', 'grpFill', 'effectLst', 'effectDag', 'uLnTx',
'uLn', 'uFillTx', 'uFill')
def __init__(self,
kumimoji=None,
lang=None,
altLang=None,
sz=None,
b=None,
i=None,
u=None,
strike=None,
kern=None,
cap=None,
spc=None,
normalizeH=None,
baseline=None,
noProof=None,
dirty=None,
err=None,
smtClean=None,
smtId=None,
bmk=None,
ln=None,
highlight=None,
latin=None,
ea=None,
cs=None,
sym=None,
hlinkClick=None,
hlinkMouseOver=None,
rtl=None,
extLst=None,
noFill=None,
solidFill=None,
gradFill=None,
blipFill=None,
pattFill=None,
grpFill=None,
effectLst=None,
effectDag=None,
uLnTx=None,
uLn=None,
uFillTx=None,
uFill=None,
):
self.kumimoji = kumimoji
self.lang = lang
self.altLang = altLang
self.sz = sz
self.b = b
self.i = i
self.u = u
self.strike = strike
self.kern = kern
self.cap = cap
self.spc = spc
self.normalizeH = normalizeH
self.baseline = baseline
self.noProof = noProof
self.dirty = dirty
self.err = err
self.smtClean = smtClean
self.smtId = smtId
self.bmk = bmk
self.ln = ln
self.highlight = highlight
self.latin = latin
self.ea = ea
self.cs = cs
self.sym = sym
self.hlinkClick = hlinkClick
self.hlinkMouseOver = hlinkMouseOver
self.rtl = rtl
self.noFill = noFill
self.solidFill = solidFill
self.gradFill = gradFill
self.blipFill = blipFill
self.pattFill = pattFill
self.grpFill = grpFill
self.effectLst = effectLst
self.effectDag = effectDag
self.uLnTx = uLnTx
self.uLn = uLn
self.uFillTx = uFillTx
self.uFill = uFill
class TabStop(Serialisable):
pos = Typed(expected_type=Coordinate, allow_none=True)
algn = Typed(expected_type=Set(values=(['l', 'ctr', 'r', 'dec'])))
def __init__(self,
pos=None,
algn=None,
):
self.pos = pos
self.algn = algn
class TabStopList(Serialisable):
tab = Typed(expected_type=TabStop, allow_none=True)
def __init__(self,
tab=None,
):
self.tab = tab
class Spacing(Serialisable):
spcPct = NestedInteger()
spcPts = NestedInteger()
__elements__ = ('spcPct', 'spcPts')
def __init__(self,
spcPct=None,
spcPts=None,
):
self.spcPct = spcPct
self.spcPts = spcPts
class AutonumberBullet(Serialisable):
type = Set(values=(['alphaLcParenBoth', 'alphaUcParenBoth',
'alphaLcParenR', 'alphaUcParenR', 'alphaLcPeriod', 'alphaUcPeriod',
'arabicParenBoth', 'arabicParenR', 'arabicPeriod', 'arabicPlain',
'romanLcParenBoth', 'romanUcParenBoth', 'romanLcParenR', 'romanUcParenR',
'romanLcPeriod', 'romanUcPeriod', 'circleNumDbPlain',
'circleNumWdBlackPlain', 'circleNumWdWhitePlain', 'arabicDbPeriod',
'arabicDbPlain', 'ea1ChsPeriod', 'ea1ChsPlain', 'ea1ChtPeriod',
'ea1ChtPlain', 'ea1JpnChsDbPeriod', 'ea1JpnKorPlain', 'ea1JpnKorPeriod',
'arabic1Minus', 'arabic2Minus', 'hebrew2Minus', 'thaiAlphaPeriod',
'thaiAlphaParenR', 'thaiAlphaParenBoth', 'thaiNumPeriod',
'thaiNumParenR', 'thaiNumParenBoth', 'hindiAlphaPeriod',
'hindiNumPeriod', 'hindiNumParenR', 'hindiAlpha1Period']))
startAt = Integer()
def __init__(self,
type=None,
startAt=None,
):
self.type = type
self.startAt = startAt
class ParagraphProperties(Serialisable):
tagname = "pPr"
namespace = DRAWING_NS
marL = Integer(allow_none=True)
marR = Integer(allow_none=True)
lvl = Integer(allow_none=True)
indent = Integer(allow_none=True)
algn = NoneSet(values=(['l', 'ctr', 'r', 'just', 'justLow', 'dist', 'thaiDist']))
defTabSz = Integer(expected_type=Coordinate, allow_none=True)
rtl = Bool(allow_none=True)
eaLnBrk = Bool(allow_none=True)
fontAlgn = NoneSet(values=(['auto', 't', 'ctr', 'base', 'b']))
latinLnBrk = Bool(allow_none=True)
hangingPunct = Bool(allow_none=True)
# uses element group EG_TextBulletColor
# uses element group EG_TextBulletSize
# uses element group EG_TextBulletTypeface
# uses element group EG_TextBullet
lnSpc = Typed(expected_type=Spacing, allow_none=True)
spcBef = Typed(expected_type=Spacing, allow_none=True)
spcAft = Typed(expected_type=Spacing, allow_none=True)
tabLst = Typed(expected_type=TabStopList, allow_none=True)
defRPr = Typed(expected_type=CharacterProperties, allow_none=True)
extLst = Typed(expected_type=OfficeArtExtensionList, allow_none=True)
buClrTx = EmptyTag()
buClr = Typed(expected_type=Color, allow_none=True)
buSzTx = EmptyTag()
buSzPct = NestedInteger(allow_none=True)
buSzPts = NestedInteger(allow_none=True)
buFontTx = EmptyTag()
buFont = Typed(expected_type=Font, allow_none=True)
buNone = EmptyTag()
buAutoNum = EmptyTag()
buChar = NestedValue(expected_type=unicode, attribute="char", allow_none=True)
buBlip = NestedValue(expected_type=Blip, attribute="blip", allow_none=True)
__elements__ = ('lnSpc', 'spcBef', 'spcAft', 'tabLst', 'defRPr',
'buClrTx', 'buClr', 'buSzTx', 'buSzPct', 'buSzPts', 'buFontTx', 'buFont',
'buNone', 'buAutoNum', 'buChar', 'buBlip')
def __init__(self,
marL=None,
marR=None,
lvl=None,
indent=None,
algn=None,
defTabSz=None,
rtl=None,
eaLnBrk=None,
fontAlgn=None,
latinLnBrk=None,
hangingPunct=None,
lnSpc=None,
spcBef=None,
spcAft=None,
tabLst=None,
defRPr=None,
extLst=None,
buClrTx=None,
buClr=None,
buSzTx=None,
buSzPct=None,
buSzPts=None,
buFontTx=None,
buFont=None,
buNone=None,
buAutoNum=None,
buChar=None,
buBlip=None,
):
self.marL = marL
self.marR = marR
self.lvl = lvl
self.indent = indent
self.algn = algn
self.defTabSz = defTabSz
self.rtl = rtl
self.eaLnBrk = eaLnBrk
self.fontAlgn = fontAlgn
self.latinLnBrk = latinLnBrk
self.hangingPunct = hangingPunct
self.lnSpc = lnSpc
self.spcBef = spcBef
self.spcAft = spcAft
self.tabLst = tabLst
self.defRPr = defRPr
self.buClrTx = buClrTx
self.buClr = buClr
self.buSzTx = buSzTx
self.buSzPct = buSzPct
self.buSzPts = buSzPts
self.buFontTx = buFontTx
self.buFont = buFont
self.buNone = buNone
self.buAutoNum = buAutoNum
self.buChar = buChar
self.buBlip = buBlip
self.defRPr = defRPr
class ListStyle(Serialisable):
tagname = "lstStyle"
namespace = DRAWING_NS
defPPr = Typed(expected_type=ParagraphProperties, allow_none=True)
lvl1pPr = Typed(expected_type=ParagraphProperties, allow_none=True)
lvl2pPr = Typed(expected_type=ParagraphProperties, allow_none=True)
lvl3pPr = Typed(expected_type=ParagraphProperties, allow_none=True)
lvl4pPr = Typed(expected_type=ParagraphProperties, allow_none=True)
lvl5pPr = Typed(expected_type=ParagraphProperties, allow_none=True)
lvl6pPr = Typed(expected_type=ParagraphProperties, allow_none=True)
lvl7pPr = Typed(expected_type=ParagraphProperties, allow_none=True)
lvl8pPr = Typed(expected_type=ParagraphProperties, allow_none=True)
lvl9pPr = Typed(expected_type=ParagraphProperties, allow_none=True)
extLst = Typed(expected_type=OfficeArtExtensionList, allow_none=True)
__elements__ = ("defPPr", "lvl1pPr", "lvl2pPr", "lvl3pPr", "lvl4pPr",
"lvl5pPr", "lvl6pPr", "lvl7pPr", "lvl8pPr", "lvl9pPr")
def __init__(self,
defPPr=None,
lvl1pPr=None,
lvl2pPr=None,
lvl3pPr=None,
lvl4pPr=None,
lvl5pPr=None,
lvl6pPr=None,
lvl7pPr=None,
lvl8pPr=None,
lvl9pPr=None,
extLst=None,
):
self.defPPr = defPPr
self.lvl1pPr = lvl1pPr
self.lvl2pPr = lvl2pPr
self.lvl3pPr = lvl3pPr
self.lvl4pPr = lvl4pPr
self.lvl5pPr = lvl5pPr
self.lvl6pPr = lvl6pPr
self.lvl7pPr = lvl7pPr
self.lvl8pPr = lvl8pPr
self.lvl9pPr = lvl9pPr
class RegularTextRun(Serialisable):
tagname = "r"
namespace = DRAWING_NS
rPr = Typed(expected_type=CharacterProperties, allow_none=True)
properties = Alias("rPr")
t = NestedText(expected_type=unicode, allow_none=True)
value = Alias("t")
__elements__ = ('rPr', 't')
def __init__(self,
rPr=None,
t=None,
):
self.rPr = rPr
self.t = t
class LineBreak(Serialisable):
rPr = Typed(expected_type=CharacterProperties, allow_none=True)
__elements__ = ('rPr',)
def __init__(self,
rPr=None,
):
self.rPr = rPr
class TextField(Serialisable):
id = String()
type = String(allow_none=True)
rPr = Typed(expected_type=CharacterProperties, allow_none=True)
pPr = Typed(expected_type=ParagraphProperties, allow_none=True)
t = Typed(expected_type=String, allow_none=True)
__elements__ = ('rPr', 'pPr')
def __init__(self,
id=None,
type=None,
rPr=None,
pPr=None,
t=None,
):
self.id = id
self.type = type
self.rPr = rPr
self.pPr = pPr
self.t = t
class Paragraph(Serialisable):
tagname = "p"
namespace = DRAWING_NS
# uses element group EG_TextRun
pPr = Typed(expected_type=ParagraphProperties, allow_none=True)
properties = Alias("pPr")
endParaRPr = Typed(expected_type=CharacterProperties, allow_none=True)
r = Typed(expected_type=RegularTextRun, allow_none=True)
text = Alias('r')
br = Typed(expected_type=LineBreak, allow_none=True)
fld = Typed(expected_type=TextField, allow_none=True)
__elements__ = ('pPr', 'endParaRPr', 'r', 'br', 'fld')
def __init__(self,
pPr=None,
endParaRPr=None,
r=None,
br=None,
fld=None,
):
self.pPr = pPr
self.endParaRPr = endParaRPr
if r is None:
r = RegularTextRun()
self.r = r
self.br = br
self.fld = fld
class GeomGuide(Serialisable):
name = Typed(expected_type=String())
fmla = Typed(expected_type=String())
def __init__(self,
name=None,
fmla=None,
):
self.name = name
self.fmla = fmla
class GeomGuideList(Serialisable):
gd = Sequence(expected_type=GeomGuide, allow_none=True)
def __init__(self,
gd=None,
):
self.gd = gd
class PresetTextShape(Serialisable):
prst = Typed(expected_type=Set(values=(
['textNoShape', 'textPlain','textStop', 'textTriangle', 'textTriangleInverted', 'textChevron',
'textChevronInverted', 'textRingInside', 'textRingOutside', 'textArchUp',
'textArchDown', 'textCircle', 'textButton', 'textArchUpPour',
'textArchDownPour', 'textCirclePour', 'textButtonPour', 'textCurveUp',
'textCurveDown', 'textCanUp', 'textCanDown', 'textWave1', 'textWave2',
'textDoubleWave1', 'textWave4', 'textInflate', 'textDeflate',
'textInflateBottom', 'textDeflateBottom', 'textInflateTop',
'textDeflateTop', 'textDeflateInflate', 'textDeflateInflateDeflate',
'textFadeRight', 'textFadeLeft', 'textFadeUp', 'textFadeDown',
'textSlantUp', 'textSlantDown', 'textCascadeUp', 'textCascadeDown'
]
)))
avLst = Typed(expected_type=GeomGuideList, allow_none=True)
def __init__(self,
prst=None,
avLst=None,
):
self.prst = prst
self.avLst = avLst
class TextNormalAutofit(Serialisable):
fontScale = Integer()
lnSpcReduction = Integer()
def __init__(self,
fontScale=None,
lnSpcReduction=None,
):
self.fontScale = fontScale
self.lnSpcReduction = lnSpcReduction
class RichTextProperties(Serialisable):
tagname = "bodyPr"
namespace = DRAWING_NS
rot = Integer(allow_none=True)
spcFirstLastPara = Bool(allow_none=True)
vertOverflow = NoneSet(values=(['overflow', 'ellipsis', 'clip']))
horzOverflow = NoneSet(values=(['overflow', 'clip']))
vert = NoneSet(values=(['horz', 'vert', 'vert270', 'wordArtVert',
'eaVert', 'mongolianVert', 'wordArtVertRtl']))
wrap = NoneSet(values=(['none', 'square']))
lIns = Integer(allow_none=True)
tIns = Integer(allow_none=True)
rIns = Integer(allow_none=True)
bIns = Integer(allow_none=True)
numCol = Integer(allow_none=True)
spcCol = Integer(allow_none=True)
rtlCol = Bool(allow_none=True)
fromWordArt = Bool(allow_none=True)
anchor = NoneSet(values=(['t', 'ctr', 'b', 'just', 'dist']))
anchorCtr = Bool(allow_none=True)
forceAA = Bool(allow_none=True)
upright = Bool(allow_none=True)
compatLnSpc = Bool(allow_none=True)
prstTxWarp = Typed(expected_type=PresetTextShape, allow_none=True)
scene3d = Typed(expected_type=Scene3D, allow_none=True)
extLst = Typed(expected_type=OfficeArtExtensionList, allow_none=True)
noAutofit = EmptyTag()
normAutofit = EmptyTag()
spAutoFit = EmptyTag()
flatTx = NestedInteger(attribute="z", allow_none=True)
__elements__ = ('prstTxWarp', 'scene3d', 'noAutofit', 'normAutofit', 'spAutoFit')
def __init__(self,
rot=None,
spcFirstLastPara=None,
vertOverflow=None,
horzOverflow=None,
vert=None,
wrap=None,
lIns=None,
tIns=None,
rIns=None,
bIns=None,
numCol=None,
spcCol=None,
rtlCol=None,
fromWordArt=None,
anchor=None,
anchorCtr=None,
forceAA=None,
upright=None,
compatLnSpc=None,
prstTxWarp=None,
scene3d=None,
extLst=None,
noAutofit=None,
normAutofit=None,
spAutoFit=None,
flatTx=None,
):
self.rot = rot
self.spcFirstLastPara = spcFirstLastPara
self.vertOverflow = vertOverflow
self.horzOverflow = horzOverflow
self.vert = vert
self.wrap = wrap
self.lIns = lIns
self.tIns = tIns
self.rIns = rIns
self.bIns = bIns
self.numCol = numCol
self.spcCol = spcCol
self.rtlCol = rtlCol
self.fromWordArt = fromWordArt
self.anchor = anchor
self.anchorCtr = anchorCtr
self.forceAA = forceAA
self.upright = upright
self.compatLnSpc = compatLnSpc
self.prstTxWarp = prstTxWarp
self.scene3d = scene3d
self.noAutofit = noAutofit
self.normAutofit = normAutofit
self.spAutoFit = spAutoFit
self.flatTx = flatTx
| saukrIppl/seahub | thirdpart/openpyxl-2.3.0-py2.7.egg/openpyxl/drawing/text.py | Python | apache-2.0 | 22,428 |
from tests.support.asserts import assert_error, assert_success, assert_dialog_handled
from tests.support.fixtures import create_dialog
from tests.support.inline import inline
alert_doc = inline("<script>window.alert()</script>")
def minimize(session):
return session.transport.send("POST", "session/%s/window/minimize" % session.session_id)
# 10.7.4 Minimize Window
def test_no_browsing_context(session, create_window):
"""
1. If the current top-level browsing context is no longer open,
return error with error code no such window.
"""
session.window_handle = create_window()
session.close()
response = minimize(session)
assert_error(response, "no such window")
def test_handle_prompt_dismiss_and_notify():
"""TODO"""
def test_handle_prompt_accept_and_notify():
"""TODO"""
def test_handle_prompt_ignore():
"""TODO"""
def test_handle_prompt_accept(new_session):
"""
2. Handle any user prompts and return its value if it is an error.
[...]
In order to handle any user prompts a remote end must take the
following steps:
[...]
2. Perform the following substeps based on the current session's
user prompt handler:
[...]
- accept state
Accept the current user prompt.
"""
_, session = new_session({"alwaysMatch": {"unhandledPromptBehavior": "accept"}})
session.url = inline("<title>WD doc title</title>")
create_dialog(session)("alert", text="dismiss #1", result_var="dismiss1")
response = minimize(session)
assert response.status == 200
assert_dialog_handled(session, "dismiss #1")
create_dialog(session)("confirm", text="dismiss #2", result_var="dismiss2")
response = minimize(session)
assert response.status == 200
assert_dialog_handled(session, "dismiss #2")
create_dialog(session)("prompt", text="dismiss #3", result_var="dismiss3")
response = minimize(session)
assert response.status == 200
assert_dialog_handled(session, "dismiss #3")
def test_handle_prompt_missing_value(session, create_dialog):
"""
2. Handle any user prompts and return its value if it is an error.
[...]
In order to handle any user prompts a remote end must take the
following steps:
[...]
2. Perform the following substeps based on the current session's
user prompt handler:
[...]
- missing value default state
1. Dismiss the current user prompt.
2. Return error with error code unexpected alert open.
"""
session.url = inline("<title>WD doc title</title>")
create_dialog("alert", text="dismiss #1", result_var="dismiss1")
response = minimize(session)
assert_error(response, "unexpected alert open")
assert_dialog_handled(session, "dismiss #1")
create_dialog("confirm", text="dismiss #2", result_var="dismiss2")
response = minimize(session)
assert_error(response, "unexpected alert open")
assert_dialog_handled(session, "dismiss #2")
create_dialog("prompt", text="dismiss #3", result_var="dismiss3")
response = minimize(session)
assert_error(response, "unexpected alert open")
assert_dialog_handled(session, "dismiss #3")
def test_fully_exit_fullscreen(session):
"""
4. Fully exit fullscreen.
[...]
To fully exit fullscreen a document document, run these steps:
1. If document's fullscreen element is null, terminate these steps.
2. Unfullscreen elements whose fullscreen flag is set, within
document's top layer, except for document's fullscreen element.
3. Exit fullscreen document.
"""
session.window.fullscreen()
assert session.execute_script("return window.fullScreen") is True
response = minimize(session)
assert_success(response)
assert session.execute_script("return window.fullScreen") is False
assert session.execute_script("return document.hidden") is True
def test_minimize(session):
"""
5. Iconify the window.
[...]
To iconify the window, given an operating system level window with an
associated top-level browsing context, run implementation-specific
steps to iconify, minimize, or hide the window from the visible
screen. Do not return from this operation until the visibility state
of the top-level browsing context's active document has reached the
hidden state, or until the operation times out.
"""
assert not session.execute_script("return document.hidden")
response = minimize(session)
assert_success(response)
assert session.execute_script("return document.hidden")
def test_payload(session):
"""
6. Return success with the JSON serialization of the current top-level
browsing context's window rect.
[...]
A top-level browsing context's window rect is defined as a
dictionary of the screenX, screenY, width and height attributes of
the WindowProxy. Its JSON representation is the following:
"x"
WindowProxy's screenX attribute.
"y"
WindowProxy's screenY attribute.
"width"
Width of the top-level browsing context's outer dimensions,
including any browser chrome and externally drawn window
decorations in CSS reference pixels.
"height"
Height of the top-level browsing context's outer dimensions,
including any browser chrome and externally drawn window
decorations in CSS reference pixels.
"""
assert not session.execute_script("return document.hidden")
response = minimize(session)
assert response.status == 200
assert isinstance(response.body["value"], dict)
value = response.body["value"]
assert "width" in value
assert "height" in value
assert "x" in value
assert "y" in value
assert isinstance(value["width"], int)
assert isinstance(value["height"], int)
assert isinstance(value["x"], int)
assert isinstance(value["y"], int)
assert session.execute_script("return document.hidden")
def test_minimize_twice_is_idempotent(session):
assert not session.execute_script("return document.hidden")
first_response = minimize(session)
assert_success(first_response)
assert session.execute_script("return document.hidden")
second_response = minimize(session)
assert_success(second_response)
assert session.execute_script("return document.hidden")
| n0max/servo | tests/wpt/web-platform-tests/webdriver/tests/minimize_window.py | Python | mpl-2.0 | 6,438 |
# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Tests for the Cylc GlobalConfig object."""
from cylc.flow.cfgspec.globalcfg import GlobalConfig, SPEC
from io import StringIO
import pytest
TEST_CONF = '''
[platforms]
[[foo]]
hosts = of_morgoth
[platform groups]
[[BAR]]
platforms = mario, sonic
[task events]
# Checking that config items that aren't platforms or platform groups
# are not output.
'''
@pytest.fixture
def fake_global_conf(tmp_path):
glblcfg = GlobalConfig(SPEC)
(tmp_path / 'global.cylc').write_text(TEST_CONF)
glblcfg.loadcfg(tmp_path / 'global.cylc')
return glblcfg
def test_dump_platform_names(capsys, fake_global_conf):
"""It dumps lists of platform names, nothing else."""
fake_global_conf.dump_platform_names(fake_global_conf)
stdout, _ = capsys.readouterr()
expected = 'localhost\nfoo\nBAR\n'
assert stdout == expected
def test_dump_platform_details(capsys, fake_global_conf):
"""It dumps lists of platform spec."""
fake_global_conf.dump_platform_details(fake_global_conf)
out, _ = capsys.readouterr()
expected = (
'[platforms]\n [[foo]]\n hosts = of_morgoth\n'
'[platform groups]\n [[BAR]]\n platforms = mario, sonic\n'
)
assert expected == out
| oliver-sanders/cylc | tests/unit/cfgspec/test_globalcfg.py | Python | gpl-3.0 | 2,047 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def get_notification_config():
notifications = { "for_doctype":
{
"Issue": {"status": "Open"},
"Warranty Claim": {"status": "Open"},
"Task": {"status": ("in", ("Open", "Overdue"))},
"Project": {"status": "Open"},
"Item": {"total_projected_qty": ("<", 0)},
"Lead": {"status": "Open"},
"Contact": {"status": "Open"},
"Opportunity": {"status": "Open"},
"Quotation": {"docstatus": 0},
"Sales Order": {
"status": ("not in", ("Completed", "Closed")),
"docstatus": ("<", 2)
},
"Journal Entry": {"docstatus": 0},
"Sales Invoice": {
"outstanding_amount": (">", 0),
"docstatus": ("<", 2)
},
"Purchase Invoice": {
"outstanding_amount": (">", 0),
"docstatus": ("<", 2)
},
"Payment Entry": {"docstatus": 0},
"Leave Application": {"status": "Open"},
"Expense Claim": {"approval_status": "Draft"},
"Job Applicant": {"status": "Open"},
"Delivery Note": {
"status": ("not in", ("Completed", "Closed")),
"docstatus": ("<", 2)
},
"Stock Entry": {"docstatus": 0},
"Material Request": {
"docstatus": ("<", 2),
"status": ("not in", ("Stopped",)),
"per_ordered": ("<", 100)
},
"Request for Quotation": { "docstatus": 0 },
"Supplier Quotation": {"docstatus": 0},
"Purchase Order": {
"status": ("not in", ("Completed", "Closed")),
"docstatus": ("<", 2)
},
"Purchase Receipt": {
"status": ("not in", ("Completed", "Closed")),
"docstatus": ("<", 2)
},
"Production Order": { "status": ("in", ("Draft", "Not Started", "In Process")) },
"BOM": {"docstatus": 0},
"Timesheet": {"status": "Draft"},
"Lab Test": {"docstatus": 0},
"Sample Collection": {"docstatus": 0},
"Patient Appointment": {"status": "Open"},
"Consultation": {"docstatus": 0}
},
"targets": {
"Company": {
"filters" : { "monthly_sales_target": ( ">", 0 ) },
"target_field" : "monthly_sales_target",
"value_field" : "total_monthly_sales"
}
}
}
doctype = [d for d in notifications.get('for_doctype')]
for doc in frappe.get_all('DocType',
fields= ["name"], filters = {"name": ("not in", doctype), 'is_submittable': 1}):
notifications["for_doctype"][doc.name] = {"docstatus": 0}
return notifications
| indictranstech/erpnext | erpnext/startup/notifications.py | Python | agpl-3.0 | 2,428 |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.internet import defer
class FakeConnection:
is_fake_test_connection = True
_waiting_for_interrupt = False
def __init__(self, testcase, name, step, commands_numbers_to_interrupt):
self.testcase = testcase
self.name = name
self.step = step
self._commands_numbers_to_interrupt = commands_numbers_to_interrupt
self._block_on_interrupt = False
self._next_command_number = 0
self._blocked_deferreds = []
@defer.inlineCallbacks
def remoteStartCommand(self, remote_command, builder_name, command_id, command_name, args):
self._waiting_for_interrupt = False
if self._next_command_number in self._commands_numbers_to_interrupt:
self._waiting_for_interrupt = True
yield self.step.interrupt('interrupt reason')
if self._waiting_for_interrupt:
raise RuntimeError("Interrupted step, but command was not interrupted")
self._next_command_number += 1
yield self.testcase._connection_remote_start_command(remote_command, self, builder_name)
# running behaviors may still attempt interrupt the command
if self._waiting_for_interrupt:
raise RuntimeError("Interrupted step, but command was not interrupted")
def remoteInterruptCommand(self, builder_name, command_id, why):
if not self._waiting_for_interrupt:
raise RuntimeError("Got interrupt, but FakeConnection was not expecting it")
self._waiting_for_interrupt = False
if self._block_on_interrupt:
d = defer.Deferred()
self._blocked_deferreds.append(d)
return d
else:
return defer.succeed(None)
def set_expect_interrupt(self):
if self._waiting_for_interrupt:
raise RuntimeError("Already expecting interrupt but got additional request")
self._waiting_for_interrupt = True
def set_block_on_interrupt(self):
self._block_on_interrupt = True
def unblock_waiters(self):
for d in self._blocked_deferreds:
d.callback(None)
| pmisik/buildbot | master/buildbot/test/fake/connection.py | Python | gpl-2.0 | 2,834 |
#
# Martin Gracik <mgracik@redhat.com>
#
# Copyright 2009 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
import unittest
from tests.baseclass import CommandTest
class FC3_TestCase(CommandTest):
command = "url"
def runTest(self):
# pass
self.assert_parse("url --url=http://domain.com", "url --url=\"http://domain.com\"\n")
self.assertFalse(self.assert_parse("url --url=http://domain.com") == None)
self.assertTrue(self.assert_parse("url --url=http://domainA.com") != \
self.assert_parse("url --url=http://domainB.com"))
self.assertFalse(self.assert_parse("url --url=http://domainA.com") == \
self.assert_parse("url --url=http://domainB.com"))
# fail
# missing required option --url
self.assert_parse_error("url")
self.assert_parse_error("url --url")
# extra test coverage
cmd = self.handler().commands[self.command]
cmd.seen = False
self.assertEqual(cmd.__str__(), "")
class F13_TestCase(FC3_TestCase):
def runTest(self):
# run FC3 test case
FC3_TestCase.runTest(self)
# pass
self.assert_parse("url --url=http://someplace/somewhere --proxy=http://wherever/other",
"url --url=\"http://someplace/somewhere\" --proxy=\"http://wherever/other\"\n")
self.assertTrue(self.assert_parse("url --url=http://domain.com --proxy=http://proxy.com") == \
self.assert_parse("url --url=http://domain.com --proxy=http://proxy.com"))
self.assertFalse(self.assert_parse("url --url=http://domain.com --proxy=http://proxyA.com") == \
self.assert_parse("url --url=http://domain.com --proxy=http://proxyB.com"))
# fail
self.assert_parse_error("cdrom --proxy=http://someplace/somewhere")
self.assert_parse_error("url --url=http://someplace/somewhere --proxy")
self.assert_parse_error("url --proxy=http://someplace/somewhere")
class F14_TestCase(F13_TestCase):
def runTest(self):
# run FC6 test case
F13_TestCase.runTest(self)
# pass
self.assert_parse("url --url=https://someplace/somewhere --noverifyssl",
"url --url=\"https://someplace/somewhere\" --noverifyssl\n")
self.assertTrue(self.assert_parse("url --url=https://domain.com --noverifyssl") == \
self.assert_parse("url --url=https://domain.com --noverifyssl"))
self.assertFalse(self.assert_parse("url --url=https://domain.com") == \
self.assert_parse("url --url=https://domain.com --noverifyssl"))
# fail
self.assert_parse_error("cdrom --noverifyssl")
class F18_TestCase(F14_TestCase):
def runTest(self):
# run F14 test case.
F14_TestCase.runTest(self)
# pass
self.assert_parse("url --mirrorlist=http://www.wherever.com/mirror",
"url --mirrorlist=\"http://www.wherever.com/mirror\"\n")
self.assertTrue(self.assert_parse("url --mirrorlist=https://domain.com") == \
self.assert_parse("url --mirrorlist=https://domain.com"))
self.assertFalse(self.assert_parse("url --url=https://domain.com") == \
self.assert_parse("url --mirrorlist=https://domain.com"))
# fail
# missing one of required options --url or --mirrorlist
self.assert_parse_error("url")
self.assert_parse_error("url --mirrorlist")
# It's --url, not --baseurl.
self.assert_parse_error("url --baseurl=www.wherever.com")
# only one of --url or --mirrorlist may be specified
self.assert_parse_error("url --url=www.wherever.com --mirrorlist=www.wherever.com")
# extra test coverage
cmd = self.handler().commands[self.command]
cmd.seen = True
cmd.url = None
cmd.mirrorlist = None
self.assertEqual(cmd.__str__(), "# Use network installation\n\n")
if __name__ == "__main__":
unittest.main()
| jikortus/pykickstart | tests/commands/url.py | Python | gpl-2.0 | 4,945 |
# Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Executor implementation, these objects create threads and process jobs in them"""
from __future__ import print_function
from threading import Thread
from .color import colorize_cmake
from .common import remove_ansi_escape
from .common import run_command
class ExecutorEvent(object):
"""This is returned by the Executor when an event occurs
Events can be jobs starting/finishing, commands starting/failing/finishing,
commands producing output (each line is an event), or when the executor
quits or failes.
"""
def __init__(self, executor_id, event_type, data, package):
self.executor_id = executor_id
self.event_type = event_type
self.data = data
self.package = package
class Executor(Thread):
"""Threaded executor for the parallel catkin build jobs"""
name_prefix = 'build'
def __init__(self, executor_id, context, comm_queue, job_queue, install_lock):
super(Executor, self).__init__()
self.name = self.name_prefix + '-' + str(executor_id + 1)
self.executor_id = executor_id
self.c = context
self.queue = comm_queue
self.jobs = job_queue
self.current_job = None
self.install_space_lock = install_lock
def job_started(self, job):
self.queue.put(ExecutorEvent(self.executor_id, 'job_started', {}, job.package.name))
def command_started(self, cmd, location):
package_name = '' if self.current_job is None else self.current_job.package.name
data = {
'cmd': cmd,
'location': location
}
self.queue.put(ExecutorEvent(self.executor_id, 'command_started', data, package_name))
def command_log(self, msg):
package_name = '' if self.current_job is None else self.current_job.package.name
data = {'message': msg}
self.queue.put(ExecutorEvent(self.executor_id, 'command_log', data, package_name))
def command_failed(self, cmd, location, retcode):
package_name = '' if self.current_job is None else self.current_job.package.name
data = {
'cmd': cmd,
'location': location,
'retcode': retcode
}
self.queue.put(ExecutorEvent(self.executor_id, 'command_failed', data, package_name))
def command_finished(self, cmd, location, retcode):
package_name = '' if self.current_job is None else self.current_job.package.name
data = {
'cmd': cmd,
'location': location,
'retcode': retcode
}
self.queue.put(ExecutorEvent(self.executor_id, 'command_finished', data, package_name))
def job_finished(self, job):
self.queue.put(ExecutorEvent(self.executor_id, 'job_finished', {}, job.package.name))
def quit(self, exc=None):
package_name = '' if self.current_job is None else self.current_job.package.name
data = {
'reason': 'normal' if exc is None else 'exception',
'exc': str(exc)
}
self.queue.put(ExecutorEvent(self.executor_id, 'exit', data, package_name))
def run(self):
try:
# Until exit
while True:
# Get a job off the queue
self.current_job = self.jobs.get()
# If the job is None, then we should shutdown
if self.current_job is None:
# Notify shutfown
self.quit()
break
# Notify that a new job was started
self.job_started(self.current_job)
# Execute each command in the job
for command in self.current_job:
install_space_locked = False
if command.lock_install_space:
self.install_space_lock.acquire()
install_space_locked = True
try:
# Log that the command being run
self.command_started(command, command.location)
# Receive lines from the running command
for line in run_command(command.cmd, cwd=command.location):
# If it is a string, log it
if isinstance(line, str):
# Ensure it is not just ansi escape characters
if remove_ansi_escape(line).strip():
for sub_line in line.splitlines(True): # keepends=True
if sub_line:
if command.stage_name == 'cmake':
sub_line = colorize_cmake(sub_line)
self.command_log(sub_line)
else:
# Otherwise it is a return code
retcode = line
# If the return code is not zero
if retcode != 0:
# Log the failure (the build loop will dispatch None's)
self.command_failed(command, command.location, retcode)
# Try to consume and throw away any and all remaining jobs in the queue
while self.jobs.get() is not None:
pass
# Once we get our None, quit
self.quit()
return
else:
self.command_finished(command, command.location, retcode)
finally:
if install_space_locked:
self.install_space_lock.release()
self.job_finished(self.current_job)
except KeyboardInterrupt:
self.quit()
except Exception as exc:
import traceback
self.quit(traceback.format_exc() + str(exc))
raise
| NikolausDemmel/catkin_tools | catkin_tools/verbs/catkin_build/executor.py | Python | apache-2.0 | 6,768 |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (C) 2005-2009 Håvard Gulldahl
# <havard@lurtgjort.no>
#
# Lisens: GPL2
#
# $Id$
###########################################################################
import fakturakomponenter
import types, sys, time
import logging
class fakturaHandling(fakturakomponenter.fakturaKomponent):#(fakturabibliotek.fakturaKomponent):
_tabellnavn = "Handling"
def __init__(self, db, Id = None, navn = None):
self.db = db
self.navn = navn
if Id is None:
Id = self.nyId()
self._id = Id
def nyId(self):
self.c.execute("INSERT INTO %s (ID, navn) VALUES (NULL, ?)" % self._tabellnavn, (self.navn,))
self.db.commit()
return self.c.lastrowid
class historiskHandling:
handlingID = 0
dato = 0
suksess = 0
navn = None
forklaring = ''
ordreID = 0
db = None
def handling(self):
return fakturaHandling(self.db, self.handlingID)
def settHandling(self, handling):
assert isinstance(handling, fakturaHandling)
self.handlingID = handling._id
return True
def finnHandling(self, navn):
assert type(navn) in types.StringTypes
self.c.execute('SELECT ID FROM Handling WHERE navn=?', (navn,))
return fakturaHandling(self.db, self.c.fetchone()[0], navn)
def registrerHandling(self):
#skriver til databasen
self.c.execute("INSERT INTO Historikk (ordreID, dato, handlingID, suksess, forklaring) VALUES (?,?,?,?,?)", (self.ordreID, self.dato, self.handlingID, (self.suksess and 1) or 0, self.forklaring))
self.db.commit()
def __init__(self, ordre, suksess, forklaring=None):
assert isinstance(ordre, fakturakomponenter.fakturaOrdre)#fakturabibliotek.fakturaOrdre)
self.db = ordre.db
self.c = self.db.cursor()
self.ordreID = ordre.ID
self.dato = time.mktime(time.localtime())
self.suksess = suksess
self.forklaring = forklaring
if self.navn is not None:
self.settHandling(self.finnHandling(self.navn))
self.registrerHandling()
class opprettet(historiskHandling):
navn = 'opprettet'
class forfalt(historiskHandling):
navn = 'forfalt'
class markertForfalt(historiskHandling):
navn = 'markertForfalt'
class purret(historiskHandling):
navn = 'purret'
class betalt(historiskHandling):
navn = 'betalt'
class avbetalt(historiskHandling):
navn = 'avBetalt'
class kansellert(historiskHandling):
navn = 'kansellert'
class avKansellert(historiskHandling):
navn = 'avKansellert'
class sendtTilInkasso(historiskHandling):
navn = 'sendtTilInkasso'
class utskrift(historiskHandling):
navn = 'utskrift'
class epostSendt(historiskHandling):
navn = 'epostSendt'
class epostSendtSmtp(historiskHandling):
navn = 'epostSendtSmtp'
class epostSendtGmail(historiskHandling):
navn = 'epostSendtGmail'
class epostSendtSendmail(historiskHandling):
navn = 'epostSendtSendmail'
class pdfEpost(historiskHandling):
navn = 'pdfEpost'
class pdfPapir(historiskHandling):
navn = 'pdfPapir'
class pdfSikkerhetskopi(historiskHandling):
navn = 'pdfSikkerhetskopi'
| kkoksvik/finfaktura | finfaktura/historikk.py | Python | gpl-2.0 | 3,291 |
import sys
res = """hmqskld
mqsd
lgfmlq
1234"""
print "Content-type:text/html"
print "Content-length:%s" %(len(res)+res.count('\n'))
print
sys.stdout.write(res.replace('\r\n','\n'))
| jhjguxin/PyCDC | Karrigell-2.3.5/webapps/cgi-bin/test.py | Python | gpl-3.0 | 185 |
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import base64
import getpass
import json
import os
import shutil
import socket
import urllib2
import xml.etree.ElementTree as ET
from optparse import OptionParser
PLUGIN_VERSION = '${release}'
DEFAULT_STACK = '${default.stack}'
SUPPORTED_OS_LIST = ['redhat6', 'redhat7']
HAWQ_LIB_STAGING_DIR = '${hawq.lib.staging.dir}'
REPO_VERSION = '${repository.version}'
HAWQ_REPO = '${hawq.repo.prefix}'
HAWQ_ADD_ONS_REPO = '${hawq.addons.repo.prefix}'
REPO_INFO = {
HAWQ_REPO: {
'repoid': '-'.join([HAWQ_REPO, REPO_VERSION]),
'input_param': '--hawqrepo',
'optional': False
},
HAWQ_ADD_ONS_REPO: {
'repoid': '-'.join([HAWQ_ADD_ONS_REPO, REPO_VERSION]),
'input_param': '--addonsrepo',
'optional': True
}
}
class APIClient:
"""
Class which interacts with Ambari Server API
"""
# Base API URL points to localhost. This script is to be executed on the Ambari Server
BASE_API_URL = 'http://localhost:8080/api/v1'
def __init__(self, user, password):
self.user = user
self.password = password
self.encoded_credentials = base64.encodestring(self.user + ':' + self.password).replace('\n', '')
def __request(self, method, url_path, headers=None, data=None):
"""
Creates API requests and packages response into the following format: (response code, response body in json object)
"""
headers = headers if headers is not None else {}
headers['Authorization'] = 'Basic {0}'.format(self.encoded_credentials)
req = urllib2.Request(self.BASE_API_URL + url_path, data, headers)
req.get_method = lambda: method
response = urllib2.urlopen(req)
response_str = response.read()
return response.getcode(), json.loads(response_str) if response_str else None
def verify_api_reachable(self):
"""
Returns true if Ambari Server is reachable through API
"""
try:
status_code, _ = self.__request('GET', '/stacks')
except Exception as e:
if type(e) == urllib2.HTTPError and e.code == 403:
raise Exception('Invalid username and/or password.')
elif type(e) == urllib2.URLError:
raise Exception('Ambari-server is not running. Please start ambari-server.')
else:
raise Exception('Unable to connect to Ambari Server.\n' + str(e))
def get_cluster_name(self):
"""
Returns the name of the installed cluster
"""
_, response_json = self.__request('GET', '/clusters')
return None if len(response_json['items']) == 0 else response_json['items'][0]['Clusters']['cluster_name']
def get_stack_info(self, cluster_name):
"""
Returns stack information (stack name, stack version, repository version) of stack installed on cluster
"""
_, response_json = self.__request('GET',
'/clusters/{0}/stack_versions?ClusterStackVersions/state.matches(CURRENT)'.format(
cluster_name))
if 'items' not in response_json or len(response_json['items']) == 0:
raise Exception('No Stack found to be installed on the cluster {0}'.format(cluster_name))
stack_versions = response_json['items'][0]['ClusterStackVersions']
return stack_versions['stack'], stack_versions['version'], stack_versions['repository_version']
def get_existing_repository_info(self, stack_name, stack_version, repository_version):
"""
Returns existing repo information for a given stack
"""
url_path = '/stacks/{0}/versions/{1}/compatible_repository_versions/{2}?fields=*,operating_systems/*,operating_systems/repositories/*'.format(
stack_name,
stack_version,
repository_version)
_, response_json = self.__request('GET', url_path)
return response_json
def update_existing_repo(self, stack_name, stack_version, repository_version, merged_repo_info):
"""
Sends a PUT request to add new repo information to the Ambari database
"""
url_path = '/stacks/{0}/versions/{1}/repository_versions/{2}'.format(stack_name, stack_version,
repository_version)
headers = {}
headers['X-Requested-By'] = 'ambari'
headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
try:
status_code, _ = self.__request('PUT', url_path, headers, merged_repo_info)
except:
# Ambari returns sporadic errors even if PUT succeeds
# Ignore any exception, because existing information from cluster will be verified after PUT request
return
class RepoUtils:
"""
Utility class for handling json structure to add new repo to existing repo
"""
def __transform_repo(self, repository):
"""
Extracts and returns the base_url, repo_id and repo_name for each repository
"""
repo_info_json = repository['Repositories']
result = {}
result['Repositories'] = dict(
(k, v) for k, v in repo_info_json.iteritems() if k in ('base_url', 'repo_id', 'repo_name'))
return result
def __transform_os_repos(self, os_repos):
"""
Constructs the json string for each operating system
"""
result = {
'OperatingSystems': {},
'repositories': []
}
result['OperatingSystems']['os_type'] = os_repos['OperatingSystems']['os_type']
result['repositories'] = [self.__transform_repo(repository) for repository in os_repos['repositories']]
return result
def __transform(self, repository_info):
"""
Constructs the json string with required repository information
"""
result = {
'operating_systems': []
}
result['operating_systems'] = [self.__transform_os_repos(os_repos) for os_repos in
repository_info['operating_systems']]
return result
def __create_repo_info_dict(self, repo):
"""
Creates json string with new repo information
"""
result = {}
result['Repositories'] = {
'base_url': repo['baseurl'],
'repo_id': repo['repoid'],
'repo_name': repo['reponame']
}
return result
def verify_repos_updated(self, existing_repo_info, repos_to_add):
"""
Checks if input repo exists for that os_type on the cluster
"""
existing_repos = self.__transform(existing_repo_info)
all_repos_updated = True
for os_repos in existing_repos['operating_systems']:
if os_repos['OperatingSystems']['os_type'] in SUPPORTED_OS_LIST:
for repo_to_add in repos_to_add:
repo_exists = False
for existing_repo in os_repos['repositories']:
if existing_repo['Repositories']['repo_id'] == repo_to_add['repoid'] and \
existing_repo['Repositories']['repo_name'] == repo_to_add['reponame'] and \
url_exists(existing_repo['Repositories']['base_url'], repo_to_add['baseurl']):
repo_exists = True
all_repos_updated = all_repos_updated and repo_exists
return all_repos_updated
def add_to_existing_repos(self, existing_repo_info, repos_to_add):
"""
Helper function for adding new repos to existing repos
"""
existing_repos = self.__transform(existing_repo_info)
for os_repos in existing_repos['operating_systems']:
if os_repos['OperatingSystems']['os_type'] in SUPPORTED_OS_LIST:
for repo_to_add in repos_to_add:
repo_exists = False
for existing_repo in os_repos['repositories']:
if existing_repo['Repositories']['repo_id'] == repo_to_add['repoid']:
repo_exists = True
existing_repo['Repositories']['repo_name'] = repo_to_add['reponame']
existing_repo['Repositories']['base_url'] = repo_to_add['baseurl']
if not repo_exists:
os_repos['repositories'].append(self.__create_repo_info_dict(repo_to_add))
return json.dumps(existing_repos)
class InputValidator:
"""
Class containing methods for validating command line inputs
"""
def __is_repourl_valid(self, repo_url):
"""
Returns True if repo_url points to a valid repository
"""
repo_url = os.path.join(repo_url, 'repodata/repomd.xml')
req = urllib2.Request(repo_url)
try:
response = urllib2.urlopen(req)
except urllib2.URLError:
return False
if response.getcode() != 200:
return False
return True
def verify_stack(self, stack):
"""
Returns stack info of stack
"""
if not stack:
# Use default stack
print 'INFO: Using default stack {0}, since --stack parameter was not specified.'.format(DEFAULT_STACK)
stack = DEFAULT_STACK
stack_pair = stack.split('-')
if len(stack_pair) != 2:
raise Exception('Specified stack {0} is not of expected format STACK_NAME-STACK_VERSION'.format(stack))
stack_name = stack_pair[0]
stack_version = stack_pair[1]
stack_dir = '/var/lib/ambari-server/resources/stacks/{0}/{1}'.format(stack_name, stack_version)
if not os.path.isdir(stack_dir):
raise Exception(
'Specified stack {0} does not exist under /var/lib/ambari-server/resources/stacks'.format(stack))
return {
'stack_name': stack_name,
'stack_version': stack_version,
'stack_dir': stack_dir
}
def verify_repo(self, repoid_prefix, repo_url):
"""
Returns repo info of repo
"""
repo_specified = True
if not repo_url:
# Use default repo_url
repo_url = 'http://{0}/{1}'.format(socket.getfqdn(), REPO_INFO[repoid_prefix]['repoid'])
repo_specified = False
if not self.__is_repourl_valid(repo_url):
if repo_specified:
raise Exception('Specified URL {0} is not a valid repository. \n'
'Please specify a valid url for {1}'.format(repo_url,
REPO_INFO[repoid_prefix]['input_param']))
elif REPO_INFO[repoid_prefix]['optional']:
return None
else:
raise Exception(
'Repository URL {0} is not valid. \nPlease ensure setup_repo.sh has been run for the {1} repository on this machine '
'OR specify a valid url for {2}'.format(repo_url, REPO_INFO[repoid_prefix]['repoid'],
REPO_INFO[repoid_prefix]['input_param']))
return {
'repoid': REPO_INFO[repoid_prefix]['repoid'],
'reponame': REPO_INFO[repoid_prefix]['repoid'],
'baseurl': repo_url
}
def url_exists(repoA, repoB):
"""
Returns True if given repourl repoA exists in repoB
"""
if type(repoB) in (list, tuple):
return repoA.rstrip('/') in [existing_url.rstrip('/') for existing_url in repoB]
else:
return repoA.rstrip('/') == repoB.rstrip('/')
def update_repoinfo(stack_dir, repos_to_add):
"""
Updates the repoinfo.xml under the specified stack_dir
"""
file_path = '{0}/repos/repoinfo.xml'.format(stack_dir)
for repo in repos_to_add:
repo['xmltext'] = '<repo>\n' \
' <repoid>{0}</repoid>\n' \
' <reponame>{1}</reponame>\n' \
' <baseurl>{2}</baseurl>\n' \
'</repo>\n'.format(repo['repoid'], repo['reponame'], repo['baseurl'])
tree = ET.parse(file_path)
root = tree.getroot()
file_needs_update = False
for os_tag in root.findall('.//os'):
if os_tag.attrib['family'] in SUPPORTED_OS_LIST:
for repo_to_add in repos_to_add:
repo_needs_update = False
for existing_repo in os_tag.findall('.//repo'):
existing_repoid = [repoid.text for repoid in existing_repo.findall('.//repoid')][0]
existing_reponame = [repoid.text for repoid in existing_repo.findall('.//reponame')][0]
existing_baseurl = [baseurl.text for baseurl in existing_repo.findall('.//baseurl')][0]
if existing_repoid == repo_to_add['repoid']:
repo_needs_update = True
print 'INFO: Repository {0} already exists with reponame {1}, baseurl {2} in {3}'.format(
repo_to_add['repoid'], existing_reponame, existing_baseurl, file_path)
if existing_reponame != repo_to_add['reponame'] or existing_baseurl != repo_to_add['baseurl']:
os_tag.remove(existing_repo)
os_tag.append(ET.fromstring(repo_to_add['xmltext']))
print 'INFO: Repository {0} updated with reponame {1}, baseurl {2} in {3}'.format(
repo_to_add['repoid'], repo_to_add['reponame'], repo_to_add['baseurl'], file_path)
file_needs_update = True
if not repo_needs_update:
os_tag.append(ET.fromstring(repo_to_add['xmltext']))
print 'INFO: Repository {0} with baseurl {1} added to {2}'.format(repo_to_add['repoid'],
repo_to_add['baseurl'], file_path)
file_needs_update = True
if file_needs_update:
tree.write(file_path)
def add_repo_to_cluster(api_client, stack, repos_to_add):
"""
Adds the new repository to the existing cluster if the specified stack has been installed on that cluster
"""
stack_name = stack['stack_name']
stack_version = stack['stack_version']
cluster_name = api_client.get_cluster_name()
# Proceed only if cluster is installed
if cluster_name is None:
return
repo_utils = RepoUtils()
installed_stack_name, installed_stack_version, installed_repository_version = api_client.get_stack_info(
cluster_name)
# Proceed only if installed stack matches input stack
if stack_name != installed_stack_name or stack_version != installed_stack_version:
return
existing_repo_info = api_client.get_existing_repository_info(stack_name, stack_version,
installed_repository_version)
new_repo_info = repo_utils.add_to_existing_repos(existing_repo_info, repos_to_add)
api_client.update_existing_repo(stack_name, stack_version, installed_repository_version, new_repo_info)
if not repo_utils.verify_repos_updated(
api_client.get_existing_repository_info(stack_name, stack_version, installed_repository_version),
repos_to_add):
raise Exception(
'Failed to update repository information on existing cluster, {0} with stack {1}-{2}'.format(cluster_name,
stack_name,
stack_version))
print 'INFO: Repositories are available on existing cluster, {0} with stack {1}-{2}'.format(cluster_name,
stack_name,
stack_version)
def write_service_info(stack_dir):
"""
Writes the service info content to the specified stack_dir
"""
stack_services = os.path.join(stack_dir, 'services')
for service in ('HAWQ', 'PXF'):
source_directory = os.path.join(HAWQ_LIB_STAGING_DIR, service)
destination_directory = os.path.join(stack_services, service)
if not os.path.exists(source_directory):
raise Exception('{0} directory was not found under {1}'.format(service, HAWQ_LIB_STAGING_DIR))
service_exists = False
if os.path.exists(destination_directory):
service_exists = True
shutil.rmtree(destination_directory)
if service_exists:
print 'INFO: Updating service {0}, which already exists under {1}'.format(service, stack_services)
shutil.copytree(source_directory, destination_directory)
print 'INFO: {0} directory was successfully {1}d under directory {2}'.format(service,
'update' if service_exists else 'create',
stack_services)
def build_parser():
"""
Builds the parser required for parsing user inputs from command line
"""
usage_string = 'Usage: ./add-hawq.py --user admin --password admin --stack HDP-2.4 --hawqrepo http://my.host.address/hawq-2.0.1.0/ --addonsrepo http://my.host.address/hawq-add-ons-2.0.1.0/'
parser = OptionParser(usage=usage_string, version='%prog {0}'.format(PLUGIN_VERSION))
parser.add_option('-u', '--user', dest='user', help='Ambari login username (Required)')
parser.add_option('-p', '--password', dest='password',
help='Ambari login password. Providing password through command line is not recommended.\n'
'The script prompts for the password.')
parser.add_option('-s', '--stack', dest='stack', help='Stack Name and Version to be added.'
'(Eg: HDP-2.4 or HDP-2.5)')
parser.add_option('-r', '--hawqrepo', dest='hawqrepo', help='Repository URL which points to the HAWQ packages')
parser.add_option('-a', '--addonsrepo', dest='addonsrepo',
help='Repository URL which points to the HAWQ Add Ons packages')
return parser
def main():
parser = build_parser()
options, _ = parser.parse_args()
user = options.user if options.user else raw_input('Enter Ambari login Username: ')
password = options.password if options.password else getpass.getpass('Enter Ambari login Password: ')
try:
# Verify if Ambari credentials are correct and API is reachable
api_client = APIClient(user, password)
api_client.verify_api_reachable()
validator = InputValidator()
stack_info = validator.verify_stack(options.stack)
repos_to_add = [validator.verify_repo(HAWQ_REPO, options.hawqrepo)]
add_ons_repo = validator.verify_repo(HAWQ_ADD_ONS_REPO, options.addonsrepo)
if add_ons_repo is not None:
repos_to_add.append(add_ons_repo)
update_repoinfo(stack_info['stack_dir'], repos_to_add)
add_repo_to_cluster(api_client, stack_info, repos_to_add)
write_service_info(stack_info['stack_dir'])
print '\nINFO: Please restart ambari-server for changes to take effect'
except Exception as e:
print '\nERROR: {0}'.format(str(e))
if __name__ == '__main__':
main()
| lavjain/incubator-hawq | contrib/hawq-ambari-plugin/src/main/resources/utils/add-hawq.py | Python | apache-2.0 | 18,999 |
import numpy as np
from screening_rules import AbstractScreeningRule
class Sasvi(AbstractScreeningRule):
""" Screening by Sasvi rule.
Liu et al (2014)
"""
debug = False
def __init__(self, tol=1e-9, debug=False):
AbstractScreeningRule.__init__(self, 'Sasvi', tol=tol)
self.debug = debug
def screen(self, l, l0, lmax, lmax_x, beta, X, y, normX, normy, nz, intervals):
theta = (y - X[nz,:].T.dot(beta[nz])) / l0
a = y/l0 - theta
a_norm = np.linalg.norm(a)
a_norm2 = a_norm**2
is_zero = ((a_norm/float(y.size)) < self.tol or self.isFirstIter(l0, lmax))
b = a + (y/l - y/l0)
b_norm = np.linalg.norm(b)
Xtb = X.dot(b)
XtO = X.dot(theta)
if not is_zero:
diff = (1.0/l - 1.0/l0) / 2.0
Xta = X.dot(a)
atb_normed = a.dot(b)/(a_norm*b_norm)
yT = y - a/a_norm2 * a.dot(y)
yT_norm = np.linalg.norm(yT)
XT = X - Xta[:, np.newaxis].dot(a.reshape(1,a.size)/a_norm2) # feats x exms
XT_norm = np.linalg.norm(XT, axis=1)
XTtyT = XT.dot(yT)
up = XtO + diff*(XT_norm*yT_norm + XTtyT)
um = -XtO + diff*(XT_norm*yT_norm - XTtyT)
inds2 = np.where((Xta>+self.tol) & (atb_normed<=Xta/(normX*a_norm)))[0]
inds3 = np.where((Xta<-self.tol) & (atb_normed<=-Xta/(normX*a_norm)))[0]
um[inds2] = -XtO[inds2] + 0.5*(normX[inds2]*b_norm - Xtb[inds2])
up[inds3] = XtO[inds3] + 0.5*(normX[inds3]*b_norm + Xtb[inds3])
else:
up = XtO + 0.5*(normX*b_norm + Xtb)
um = -XtO + 0.5*(normX*b_norm - Xtb)
S = np.max((um, up), axis=0)
inds = np.where(S >= 1.0 - self.tol)[0]
# debug: compare with very slow but straightforward implementation
if self.debug:
(dbg_inds, dbg_interval) = self.screen_dbg(l, l0, lmax, lmax_x, beta, X, y, normX, normy, nz, intervals)
cinds = np.where(np.abs(inds-dbg_inds)>1e-6)[0]
if cinds.size>0:
print('Error: Implementations do not coincide.')
else:
print('You\'re fine..')
return (inds, intervals)
def screen_dbg(self, l, l0, lmax, lmax_x, beta, X, y, normX, normy, nz, intervals):
theta = (y - X[nz,:].T.dot(beta[nz])) / l0
a = y/l0 - theta
an = np.linalg.norm(a)
an2 = an*an
is_zero = ((an/float(y.size)) < self.tol or self.isFirstIter(l0, lmax))
b = a + (y/l - y/l0)
bn = np.linalg.norm(b)
diff = (1.0/l - 1.0/l0) / 2.0
Xta = X.dot(a)
if not is_zero:
adotb = a.dot(b)/(an*bn)
yT = y - a/an2 * a.dot(y)
normyT = np.linalg.norm(yT)
inds = []
for j in range(X.shape[0]):
xj = X[j,:]
up = 10.0
um = 10.0
# 1)
if not is_zero and adotb>np.abs(Xta[j])/(normX[j]*an):
xjT = xj - a/an2 * Xta[j]
normxjT = np.linalg.norm(xjT)
up = theta.dot(xj) + diff*(normxjT*normyT + xjT.dot(yT))
um = -theta.dot(xj) + diff*(normxjT*normyT - xjT.dot(yT))
# 2)
if not is_zero and Xta[j]>+self.tol and adotb<=Xta[j]/(normX[j]*an):
xjT = xj - a/an2 * Xta[j]
normxjT = np.linalg.norm(xjT)
up = theta.dot(xj) + diff*(normxjT*normyT + xjT.dot(yT))
um = -theta.dot(xj) + 0.5*(normX[j]*bn - xj.dot(b))
# 3)
if not is_zero and Xta[j]<-self.tol and adotb<=-Xta[j]/(normX[j]*an):
up = theta.dot(xj) + 0.5*(normX[j]*bn + xj.dot(b))
xjT = xj - a/an2 * Xta[j]
normxjT = np.linalg.norm(xjT)
um = -theta.dot(xj) + diff*(normxjT*normyT - xjT.dot(yT))
# 4)
if is_zero:
up = theta.dot(xj) + 0.5*(normX[j]*bn + xj.dot(b))
um = -theta.dot(xj) + 0.5*(normX[j]*bn - xj.dot(b))
if um>=1.0-self.tol or up>=1.0-self.tol:
inds.append(j)
#print inds
inds = np.array(inds).astype(int)
return (inds, intervals)
def get_sphere(self, l, l0, lmax, lmax_x, beta, X, y, normX, normy, nz, intervals):
E = (y - X[nz,:].T.dot(beta[nz])) / l0
C = y/l
o = 0.5*(E-C)+C
rho = 0.5*np.linalg.norm(E-C)
#rho = 0.5 * np.linalg.norm(X[nz,:].T.dot(beta[nz])/l0 + (y/l-y/l0) )
#rho = np.sqrt(o.dot(o)-E.dot(C))
return (o, rho)
def get_local_halfspaces(self, o, l, l0, lmax, lmax_x, beta, X, y, normX, normy, nz, intervals):
if not self.isFirstIter(l0, lmax):
theta = (y - X[nz,:].T.dot(beta[nz])) / l0
ak = -(theta-y/l0)
normak = np.linalg.norm(ak)
bk = np.array([ak.dot(theta)])
ak = ak.reshape(1, y.size)
else:
ak = np.array([])
bk = np.array([])
normak = np.array([])
return (ak, bk, normak)
| nicococo/AdaScreen | adascreen/sasvi.py | Python | mit | 5,180 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.