repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
xiangel/hue | refs/heads/master | desktop/libs/hadoop/src/hadoop/fs/test_webhdfs.py | 16 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import random
import sys
import threading
import unittest
from nose.plugins.skip import SkipTest
from nose.tools import assert_false, assert_true, assert_equals, assert_raises, assert_not_equals
from hadoop import pseudo_hdfs4
from hadoop.fs.exceptions import WebHdfsException
from hadoop.fs.hadoopfs import Hdfs
from hadoop.pseudo_hdfs4 import is_live_cluster
LOG = logging.getLogger(__name__)
class WebhdfsTests(unittest.TestCase):
requires_hadoop = True
@classmethod
def setUpClass(cls):
cls.cluster = pseudo_hdfs4.shared_cluster()
cls.prefix = cls.cluster.fs_prefix + '/WebhdfsTests'
cls.cluster.fs.setuser('test')
cls.cluster.fs.mkdir(cls.prefix)
cls.cluster.fs.chmod(cls.prefix, 01777)
def setUp(self):
self.cluster.fs.setuser('test')
def test_webhdfs(self):
"""
Minimal tests for a few basic file system operations.
"""
fs = self.cluster.fs
test_file = self.prefix + "/fortest.txt"
f = fs.open(test_file, "w")
try:
f.write("hello")
f.close()
assert_equals("hello", fs.open(test_file).read())
assert_equals(5, fs.stats(test_file)["size"])
assert_true(fs.isfile(test_file))
assert_false(fs.isfile("/"))
assert_true(fs.isdir("/"))
assert_false(fs.isdir(test_file))
finally:
fs.remove(test_file)
def test_webhdfs_functions(self):
"""
Tests advanced file system operations.
"""
fs = self.cluster.fs
# Create home dir
fs.create_home_dir("/user/test_webhdfs")
assert_true(fs.isdir("/user/test_webhdfs"))
fs.do_as_superuser(fs.rmtree, "/user/test_webhdfs")
def test_seek(self):
"""Test for DESKTOP-293 - ensure seek works in python2.4"""
fs = self.cluster.fs
test_file = self.prefix + "/fortest.txt"
f = fs.open(test_file, "w")
try:
f.write("hello")
f.close()
f = fs.open(test_file, "r")
f.seek(0, os.SEEK_SET)
assert_equals("he", f.read(2))
f.seek(1, os.SEEK_SET)
assert_equals("el", f.read(2))
f.seek(-1, os.SEEK_END)
assert_equals("o", f.read())
f.seek(0, os.SEEK_SET)
f.seek(2, os.SEEK_CUR)
assert_equals("ll", f.read(2))
finally:
fs.remove(test_file)
def test_seek_across_blocks(self):
"""Makes a file with a lot of blocks, seeks around"""
if is_live_cluster():
raise SkipTest('HUE-2946: Skipping because requires more memory')
fs = self.cluster.fs
test_file = self.prefix + "/fortest-blocks.txt"
fs.create(test_file, replication=1, blocksize=1024**2)
f = fs.open(test_file, "w")
try:
data = "abcdefghijklmnopqrstuvwxyz" * 30 * 1024**2
f.write(data)
f.close()
for i in xrange(1, 10):
f = fs.open(test_file, "r")
for j in xrange(1, 100):
offset = random.randint(0, len(data) - 1)
f.seek(offset, os.SEEK_SET)
assert_equals(data[offset:offset+50], f.read(50))
f.close()
finally:
fs.remove(test_file)
def test_exceptions(self):
"""
Tests that appropriate exceptions are raised.
"""
fs = self.cluster.fs
test_file = self.prefix + "/for_exception_test.txt"
f = fs.open(test_file, "w")
f.write("foo")
f.close()
fs.chmod(test_file, 0400)
fs.setuser("notsuperuser")
f = fs.open(test_file)
assert_raises(WebHdfsException, f.read)
def test_umask(self):
fs = self.cluster.fs
prefix = self.prefix + '/test_umask'
fs_umask = fs._umask
fs._umask = 01022
try:
test_dir = prefix + '/umask_test_dir'
fs.mkdir(test_dir)
test_file = prefix + '/umask_test.txt'
f = fs.open(test_file, "w")
f.write("foo")
f.close()
# Check currrent permissions are 777 (666 for file)
assert_equals('40755', '%o' % fs.stats(test_dir).mode)
assert_equals('100644', '%o' % fs.stats(test_file).mode)
finally:
fs._umask = fs_umask
fs_umask = fs._umask
fs._umask = 0077
prefix += '/2'
try:
test_dir = prefix + '/umask_test_dir'
fs.mkdir(test_dir)
test_file = prefix + '/umask_test.txt'
fs.create(test_file)
# Check currrent permissions are not 777 (666 for file)
assert_equals('41700', '%o' % fs.stats(test_dir).mode)
assert_equals('100600', '%o' % fs.stats(test_file).mode)
finally:
fs._umask = fs_umask
def test_umask_overriden(self):
fs = self.cluster.fs
prefix = self.prefix + '/test_umask_overriden'
fs_umask = fs._umask
fs._umask = 01022
try:
test_dir = prefix + '/umask_test_dir'
fs.mkdir(test_dir, 0333)
test_file = prefix + '/umask_test.txt'
fs.create(test_file, permission=0333)
assert_equals('40333', '%o' % fs.stats(test_dir).mode)
assert_equals('100333', '%o' % fs.stats(test_file).mode)
finally:
fs._umask = fs_umask
def test_umask_without_sticky(self):
fs = self.cluster.fs
prefix = self.prefix + '/test_umask_without_sticky'
fs_umask = fs._umask
fs._umask = 022
try:
test_dir = prefix + '/umask_test_dir'
fs.mkdir(test_dir)
test_file = prefix + '/umask_test.txt'
fs.create(test_file)
assert_equals('41755', '%o' % fs.stats(test_dir).mode)
assert_equals('100644', '%o' % fs.stats(test_file).mode)
finally:
fs._umask = fs_umask
def test_copy_remote_dir(self):
fs = self.cluster.fs
src_dir = self.prefix + '/copy_remote_dir'
fs.mkdir(src_dir)
f1 = fs.open(src_dir + "/test_one.txt", "w")
f1.write("foo")
f1.close()
f2 = fs.open(src_dir + "/test_two.txt", "w")
f2.write("bar")
f2.close()
new_owner = 'testcopy'
new_owner_dir = self.prefix + '/' + new_owner + '/test-copy'
fs.copy_remote_dir(src_dir, new_owner_dir, dir_mode=0755, owner=new_owner)
dir_stat = fs.stats(new_owner_dir)
assert_equals(new_owner, dir_stat.user)
# assert_equals(new_owner, dir_stat.group) We inherit supergroup now
assert_equals('40755', '%o' % dir_stat.mode)
src_stat = fs.listdir_stats(src_dir)
dest_stat = fs.listdir_stats(new_owner_dir)
src_names = set([stat.name for stat in src_stat])
dest_names = set([stat.name for stat in dest_stat])
assert_true(src_names)
assert_equals(src_names, dest_names)
for stat in dest_stat:
assert_equals('testcopy', stat.user)
# assert_equals('testcopy', stat.group) We inherit supergroup now
assert_equals('100644', '%o' % stat.mode)
def test_two_files_open(self):
"""
See DESKTOP-510. There was a bug where you couldn't open two files at
the same time. It boils down to a close_fds=True issue. If this doesn't
hang, all is good.
"""
fs = self.cluster.fs
f1 = fs.open(self.prefix + "/test_one.txt", "w")
f2 = fs.open(self.prefix + "/test_two.txt", "w")
f1.write("foo")
f2.write("bar")
f1.close()
f2.close()
# This should work, not hang, etc.
def test_urlsplit(self):
"""Test Hdfs urlsplit"""
url = 'hdfs://nn.no.port/foo/bar'
assert_equals(('hdfs', 'nn.no.port', '/foo/bar', '', ''), Hdfs.urlsplit(url))
url = 'hdfs://nn:8020/foo/bar'
assert_equals(('hdfs', 'nn:8020', '/foo/bar', '', ''), Hdfs.urlsplit(url))
url = 'hdfs://nn:8020//foo//bar'
assert_equals(('hdfs', 'nn:8020', '/foo/bar', '', ''), Hdfs.urlsplit(url))
url = 'hdfs://nn:8020'
assert_equals(('hdfs', 'nn:8020', '/', '', ''), Hdfs.urlsplit(url))
url = '/foo/bar'
assert_equals(('hdfs', '', '/foo/bar', '', ''), Hdfs.urlsplit(url))
url = 'foo//bar'
assert_equals(('hdfs', '', 'foo/bar', '', ''), Hdfs.urlsplit(url))
def test_i18n_namespace(self):
# Use utf-8 encoding
reload(sys)
sys.setdefaultencoding('utf-8')
def check_existence(name, parent, present=True):
assertion = present and assert_true or assert_false
listing = self.cluster.fs.listdir(parent)
assertion(name in listing, "%s should be in %s" % (name, listing))
name = u'''pt-Olá_ch-你好_ko-안녕_ru-Здравствуйте%20,.<>~`!@#$%^&()_-+='"'''
prefix = self.prefix + '/tmp/i18n'
dir_path = '%s/%s' % (prefix, name)
file_path = '%s/%s' % (dir_path, name)
try:
# Create a directory
self.cluster.fs.mkdir(dir_path)
# Directory is there
check_existence(name, prefix)
# Create a file (same name) in the directory
self.cluster.fs.open(file_path, 'w').close()
# File is there
check_existence(name, dir_path)
# Test rename
new_file_path = file_path + '.new'
self.cluster.fs.rename(file_path, new_file_path)
# New file is there
check_existence(name + '.new', dir_path)
# Test remove
self.cluster.fs.remove(new_file_path)
check_existence(name + '.new', dir_path, present=False)
# Test rmtree
self.cluster.fs.rmtree(dir_path)
check_existence(name, prefix, present=False)
# Test exception can handle non-ascii characters
try:
self.cluster.fs.rmtree(dir_path)
except IOError, ex:
LOG.info('Successfully caught error: %s' % ex)
finally:
try:
self.cluster.fs.rmtree(prefix)
except Exception, ex:
LOG.error('Failed to cleanup %s: %s' % (prefix, ex))
# Reset encoding
reload(sys)
sys.setdefaultencoding('ascii')
def test_chmod(self):
# Create a test directory with
# a subdirectory and a few files.
dir1 = self.prefix + '/test_chmod'
subdir1 = dir1 + '/test1'
file1 = subdir1 + '/test1.txt'
fs = self.cluster.fs
try:
fs.setuser(fs.superuser)
fs.mkdir(subdir1)
f = fs.open(file1, "w")
f.write("hello")
f.close()
# Check currrent permissions are not 777 (666 for file)
fs.chmod(dir1, 01000, recursive=True)
assert_equals(041000, fs.stats(dir1).mode)
assert_equals(041000, fs.stats(subdir1).mode)
assert_equals(0101000, fs.stats(file1).mode)
# Chmod non-recursive
fs.chmod(dir1, 01222, recursive=False)
assert_equals(041222, fs.stats(dir1).mode)
assert_equals(041000, fs.stats(subdir1).mode)
assert_equals(0101000, fs.stats(file1).mode)
# Chmod recursive
fs.chmod(dir1, 01444, recursive=True)
assert_equals(041444, fs.stats(dir1).mode)
assert_equals(041444, fs.stats(subdir1).mode)
assert_equals(0101444, fs.stats(file1).mode)
finally:
fs.rmtree(dir1, skip_trash=True)
fs.setuser('test')
def test_chown(self):
# Create a test directory with
# a subdirectory and a few files.
dir1 = self.prefix + '/test_chown'
subdir1 = dir1 + '/test1'
file1 = subdir1 + '/test1.txt'
fs = self.cluster.fs
try:
fs.setuser(fs.superuser)
fs.mkdir(subdir1)
f = fs.open(file1, "w")
f.write("hello")
f.close()
# Check currrent owners are not user test
LOG.info(str(fs.stats(dir1).__dict__))
assert_not_equals('test', fs.stats(dir1).user)
assert_not_equals('test', fs.stats(subdir1).user)
assert_not_equals('test', fs.stats(file1).user)
# Chown non-recursive
fs.chown(dir1, 'test', recursive=False)
assert_equals('test', fs.stats(dir1).user)
assert_not_equals('test', fs.stats(subdir1).user)
assert_not_equals('test', fs.stats(file1).user)
# Chown recursive
fs.chown(dir1, 'test', recursive=True)
assert_equals('test', fs.stats(dir1).user)
assert_equals('test', fs.stats(subdir1).user)
assert_equals('test', fs.stats(file1).user)
finally:
fs.rmtree(dir1, skip_trash=True)
fs.setuser('test')
def test_trash_and_restore(self):
PATH = self.cluster.fs.join(self.cluster.fs.get_home_dir(), 'trash_test')
try:
# Trash
self.cluster.fs.open(PATH, 'w').close()
assert_true(self.cluster.fs.exists(PATH))
self.cluster.fs.remove(PATH)
assert_false(self.cluster.fs.exists(PATH))
assert_true(self.cluster.fs.exists(self.cluster.fs.trash_path))
trash_dirs = self.cluster.fs.listdir(self.cluster.fs.trash_path)
trash_paths = [self.cluster.fs.join(self.cluster.fs.trash_path, trash_dir, PATH[1:]) for trash_dir in trash_dirs]
exists = map(self.cluster.fs.exists, trash_paths)
assert_true(reduce(lambda a, b: a or b, exists), trash_paths)
trash_path = reduce(lambda a, b: a[0] and a or b, zip(exists, trash_paths))[1]
# Restore
self.cluster.fs.restore(trash_path)
assert_false(self.cluster.fs.exists(trash_path))
assert_true(self.cluster.fs.exists(PATH))
finally:
try:
self.cluster.fs.rmtree(PATH)
except Exception, ex:
LOG.error('Failed to cleanup %s: %s' % (PATH, ex))
def test_trash_and_purge(self):
PATH = self.cluster.fs.join(self.cluster.fs.get_home_dir(), 'trash_test')
try:
# Trash
self.cluster.fs.open(PATH, 'w').close()
assert_true(self.cluster.fs.exists(PATH))
self.cluster.fs.remove(PATH)
assert_false(self.cluster.fs.exists(PATH))
assert_true(self.cluster.fs.exists(self.cluster.fs.trash_path))
trash_dirs = self.cluster.fs.listdir(self.cluster.fs.trash_path)
trash_paths = [self.cluster.fs.join(self.cluster.fs.trash_path, trash_dir, PATH[1:]) for trash_dir in trash_dirs]
exists = map(self.cluster.fs.exists, trash_paths)
assert_true(reduce(lambda a, b: a or b, exists), trash_paths)
trash_path = reduce(lambda a, b: a[0] and a or b, zip(exists, trash_paths))[1]
# Purge
self.cluster.fs.purge_trash()
assert_false(self.cluster.fs.exists(trash_path))
assert_false(self.cluster.fs.exists(PATH))
finally:
try:
self.cluster.fs.rmtree(PATH)
except Exception, ex:
LOG.error('Failed to cleanup %s: %s' % (PATH, ex))
def test_restore_error(self):
PATH = self.cluster.fs.join(self.cluster.fs.get_home_dir(), 'trash_test')
try:
# Trash
self.cluster.fs.open(PATH, 'w').close()
assert_true(self.cluster.fs.exists(PATH))
self.cluster.fs.remove(PATH)
assert_false(self.cluster.fs.exists(PATH))
assert_true(self.cluster.fs.exists(self.cluster.fs.trash_path))
trash_dirs = self.cluster.fs.listdir(self.cluster.fs.trash_path)
trash_paths = [self.cluster.fs.join(self.cluster.fs.trash_path, trash_dir, PATH[1:]) for trash_dir in trash_dirs]
exists = map(self.cluster.fs.exists, trash_paths)
assert_true(reduce(lambda a, b: a or b, exists), trash_paths)
trash_path = reduce(lambda a, b: a[0] and a or b, zip(exists, trash_paths))[1]
# Purge
self.cluster.fs.purge_trash()
assert_false(self.cluster.fs.exists(trash_path))
assert_false(self.cluster.fs.exists(PATH))
# Restore fail
assert_raises(IOError, self.cluster.fs.restore, trash_path)
finally:
try:
self.cluster.fs.rmtree(PATH)
except Exception, ex:
LOG.error('Failed to cleanup %s: %s' % (PATH, ex))
def test_trash_permissions(self):
PATH = self.cluster.fs.join(self.cluster.fs.get_home_dir(), 'trash_test')
try:
# Trash
self.cluster.fs.open(PATH, 'w').close()
assert_true(self.cluster.fs.exists(PATH))
self.cluster.fs.remove(PATH)
assert_false(self.cluster.fs.exists(PATH))
assert_true(self.cluster.fs.exists(self.cluster.fs.trash_path))
trash_dirs = self.cluster.fs.listdir(self.cluster.fs.trash_path)
trash_paths = [self.cluster.fs.join(self.cluster.fs.trash_path, trash_dir, PATH[1:]) for trash_dir in trash_dirs]
exists = map(self.cluster.fs.exists, trash_paths)
assert_true(reduce(lambda a, b: a or b, exists), trash_paths)
trash_path = reduce(lambda a, b: a[0] and a or b, zip(exists, trash_paths))[1]
# Restore
assert_raises(IOError, self.cluster.fs.do_as_user, 'nouser', self.cluster.fs.restore, trash_path)
finally:
try:
self.cluster.fs.rmtree(PATH)
except Exception, ex:
LOG.error('Failed to cleanup %s: %s' % (PATH, ex))
def test_trash_users(self):
"""
Imitate eventlet green thread re-use and ensure trash works.
"""
class test_local(object):
def __getattribute__(self, name):
return object.__getattribute__(self, name)
def __setattr__(self, name, value):
return object.__setattr__(self, name, value)
def __delattr__(self, name):
return object.__delattr__(self, name)
threading.local = test_local
USERS = ['test1', 'test2']
CLEANUP = []
try:
for user in USERS:
# Create home directory.
self.cluster.fs.setuser(user)
self.cluster.fs.create_home_dir()
CLEANUP.append(self.cluster.fs.get_home_dir())
# Move to trash for both users.
# If there is a thread local issue, then this will fail.
PATH = self.cluster.fs.join(self.cluster.fs.get_home_dir(), 'trash_test')
self.cluster.fs.open(PATH, 'w').close()
assert_true(self.cluster.fs.exists(PATH))
self.cluster.fs.remove(PATH)
assert_false(self.cluster.fs.exists(PATH))
assert_true(self.cluster.fs.exists(self.cluster.fs.trash_path))
finally:
reload(threading)
self.cluster.fs.setuser(self.cluster.superuser)
for directory in CLEANUP:
try:
self.cluster.fs.rmtree(dir)
except Exception, ex:
LOG.error('Failed to cleanup %s: %s' % (directory, ex))
|
newsteinking/docker | refs/heads/master | pip/vcs/__init__.py | 35 | """Handles all VCS (version control) support"""
from __future__ import absolute_import
import errno
import logging
import os
import shutil
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip.exceptions import BadCommand
from pip.utils import (display_path, backup_dir, call_subprocess,
rmtree, ask_path_exists)
__all__ = ['vcs', 'get_src_requirement']
logger = logging.getLogger(__name__)
class VcsSupport(object):
_registry = {}
schemes = ['ssh', 'git', 'hg', 'bzr', 'sftp', 'svn']
def __init__(self):
# Register more schemes with urlparse for various version control
# systems
urllib_parse.uses_netloc.extend(self.schemes)
# Python >= 2.7.4, 3.3 doesn't have uses_fragment
if getattr(urllib_parse, 'uses_fragment', None):
urllib_parse.uses_fragment.extend(self.schemes)
super(VcsSupport, self).__init__()
def __iter__(self):
return self._registry.__iter__()
@property
def backends(self):
return list(self._registry.values())
@property
def dirnames(self):
return [backend.dirname for backend in self.backends]
@property
def all_schemes(self):
schemes = []
for backend in self.backends:
schemes.extend(backend.schemes)
return schemes
def register(self, cls):
if not hasattr(cls, 'name'):
logger.warning('Cannot register VCS %s', cls.__name__)
return
if cls.name not in self._registry:
self._registry[cls.name] = cls
logger.debug('Registered VCS backend: %s', cls.name)
def unregister(self, cls=None, name=None):
if name in self._registry:
del self._registry[name]
elif cls in self._registry.values():
del self._registry[cls.name]
else:
logger.warning('Cannot unregister because no class or name given')
def get_backend_name(self, location):
"""
Return the name of the version control backend if found at given
location, e.g. vcs.get_backend_name('/path/to/vcs/checkout')
"""
for vc_type in self._registry.values():
logger.debug('Checking in %s for %s (%s)...',
location, vc_type.dirname, vc_type.name)
path = os.path.join(location, vc_type.dirname)
if os.path.exists(path):
logger.debug('Determine that %s uses VCS: %s',
location, vc_type.name)
return vc_type.name
return None
def get_backend(self, name):
name = name.lower()
if name in self._registry:
return self._registry[name]
def get_backend_from_location(self, location):
vc_type = self.get_backend_name(location)
if vc_type:
return self.get_backend(vc_type)
return None
vcs = VcsSupport()
class VersionControl(object):
name = ''
dirname = ''
# List of supported schemes for this Version Control
schemes = ()
def __init__(self, url=None, *args, **kwargs):
self.url = url
super(VersionControl, self).__init__(*args, **kwargs)
def _filter(self, line):
return (logging.DEBUG, line)
def _is_local_repository(self, repo):
"""
posix absolute paths start with os.path.sep,
win32 ones ones start with drive (like c:\\folder)
"""
drive, tail = os.path.splitdrive(repo)
return repo.startswith(os.path.sep) or drive
# See issue #1083 for why this method was introduced:
# https://github.com/pypa/pip/issues/1083
def translate_egg_surname(self, surname):
# For example, Django has branches of the form "stable/1.7.x".
return surname.replace('/', '_')
def export(self, location):
"""
Export the repository at the url to the destination location
i.e. only download the files, without vcs informations
"""
raise NotImplementedError
def get_url_rev(self):
"""
Returns the correct repository URL and revision by parsing the given
repository URL
"""
error_message = (
"Sorry, '%s' is a malformed VCS url. "
"The format is <vcs>+<protocol>://<url>, "
"e.g. svn+http://myrepo/svn/MyApp#egg=MyApp"
)
assert '+' in self.url, error_message % self.url
url = self.url.split('+', 1)[1]
scheme, netloc, path, query, frag = urllib_parse.urlsplit(url)
rev = None
if '@' in path:
path, rev = path.rsplit('@', 1)
url = urllib_parse.urlunsplit((scheme, netloc, path, query, ''))
return url, rev
def get_info(self, location):
"""
Returns (url, revision), where both are strings
"""
assert not location.rstrip('/').endswith(self.dirname), \
'Bad directory: %s' % location
return self.get_url(location), self.get_revision(location)
def normalize_url(self, url):
"""
Normalize a URL for comparison by unquoting it and removing any
trailing slash.
"""
return urllib_parse.unquote(url).rstrip('/')
def compare_urls(self, url1, url2):
"""
Compare two repo URLs for identity, ignoring incidental differences.
"""
return (self.normalize_url(url1) == self.normalize_url(url2))
def obtain(self, dest):
"""
Called when installing or updating an editable package, takes the
source path of the checkout.
"""
raise NotImplementedError
def switch(self, dest, url, rev_options):
"""
Switch the repo at ``dest`` to point to ``URL``.
"""
raise NotImplementedError
def update(self, dest, rev_options):
"""
Update an already-existing repo to the given ``rev_options``.
"""
raise NotImplementedError
def check_destination(self, dest, url, rev_options, rev_display):
"""
Prepare a location to receive a checkout/clone.
Return True if the location is ready for (and requires) a
checkout/clone, False otherwise.
"""
checkout = True
prompt = False
if os.path.exists(dest):
checkout = False
if os.path.exists(os.path.join(dest, self.dirname)):
existing_url = self.get_url(dest)
if self.compare_urls(existing_url, url):
logger.debug(
'%s in %s exists, and has correct URL (%s)',
self.repo_name.title(),
display_path(dest),
url,
)
logger.info(
'Updating %s %s%s',
display_path(dest),
self.repo_name,
rev_display,
)
self.update(dest, rev_options)
else:
logger.warning(
'%s %s in %s exists with URL %s',
self.name,
self.repo_name,
display_path(dest),
existing_url,
)
prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ',
('s', 'i', 'w', 'b'))
else:
logger.warning(
'Directory %s already exists, and is not a %s %s.',
dest,
self.name,
self.repo_name,
)
prompt = ('(i)gnore, (w)ipe, (b)ackup ', ('i', 'w', 'b'))
if prompt:
logger.warning(
'The plan is to install the %s repository %s',
self.name,
url,
)
response = ask_path_exists('What to do? %s' % prompt[0],
prompt[1])
if response == 's':
logger.info(
'Switching %s %s to %s%s',
self.repo_name,
display_path(dest),
url,
rev_display,
)
self.switch(dest, url, rev_options)
elif response == 'i':
# do nothing
pass
elif response == 'w':
logger.warning('Deleting %s', display_path(dest))
rmtree(dest)
checkout = True
elif response == 'b':
dest_dir = backup_dir(dest)
logger.warning(
'Backing up %s to %s', display_path(dest), dest_dir,
)
shutil.move(dest, dest_dir)
checkout = True
return checkout
def unpack(self, location):
"""
Clean up current location and download the url repository
(and vcs infos) into location
"""
if os.path.exists(location):
rmtree(location)
self.obtain(location)
def get_src_requirement(self, dist, location, find_tags=False):
"""
Return a string representing the requirement needed to
redownload the files currently present in location, something
like:
{repository_url}@{revision}#egg={project_name}-{version_identifier}
If find_tags is True, try to find a tag matching the revision
"""
raise NotImplementedError
def get_url(self, location):
"""
Return the url used at location
Used in get_info or check_destination
"""
raise NotImplementedError
def get_revision(self, location):
"""
Return the current revision of the files at location
Used in get_info
"""
raise NotImplementedError
def run_command(self, cmd, show_stdout=True,
filter_stdout=None, cwd=None,
raise_on_returncode=True,
command_level=logging.DEBUG, command_desc=None,
extra_environ=None):
"""
Run a VCS subcommand
This is simply a wrapper around call_subprocess that adds the VCS
command name, and checks that the VCS is available
"""
cmd = [self.name] + cmd
try:
return call_subprocess(cmd, show_stdout, filter_stdout, cwd,
raise_on_returncode, command_level,
command_desc, extra_environ)
except OSError as e:
# errno.ENOENT = no such file or directory
# In other words, the VCS executable isn't available
if e.errno == errno.ENOENT:
raise BadCommand('Cannot find command %r' % self.name)
else:
raise # re-raise exception if a different error occured
def get_src_requirement(dist, location, find_tags):
version_control = vcs.get_backend_from_location(location)
if version_control:
try:
return version_control().get_src_requirement(dist,
location,
find_tags)
except BadCommand:
logger.warning(
'cannot determine version of editable source in %s '
'(%s command not found in path)',
location,
version_control.name,
)
return dist.as_requirement()
logger.warning(
'cannot determine version of editable source in %s (is not SVN '
'checkout, Git clone, Mercurial clone or Bazaar branch)',
location,
)
return dist.as_requirement()
|
AccelAI/accel.ai | refs/heads/master | flask-aws/lib/python2.7/site-packages/boto/ec2/blockdevicemapping.py | 12 | # Copyright (c) 2009-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
class BlockDeviceType(object):
"""
Represents parameters for a block device.
"""
def __init__(self,
connection=None,
ephemeral_name=None,
no_device=False,
volume_id=None,
snapshot_id=None,
status=None,
attach_time=None,
delete_on_termination=False,
size=None,
volume_type=None,
iops=None):
self.connection = connection
self.ephemeral_name = ephemeral_name
self.no_device = no_device
self.volume_id = volume_id
self.snapshot_id = snapshot_id
self.status = status
self.attach_time = attach_time
self.delete_on_termination = delete_on_termination
self.size = size
self.volume_type = volume_type
self.iops = iops
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
lname = name.lower()
if name == 'volumeId':
self.volume_id = value
elif lname == 'virtualname':
self.ephemeral_name = value
elif lname == 'nodevice':
self.no_device = (value == 'true')
elif lname == 'snapshotid':
self.snapshot_id = value
elif lname == 'volumesize':
self.size = int(value)
elif lname == 'status':
self.status = value
elif lname == 'attachtime':
self.attach_time = value
elif lname == 'deleteontermination':
self.delete_on_termination = (value == 'true')
elif lname == 'volumetype':
self.volume_type = value
elif lname == 'iops':
self.iops = int(value)
else:
setattr(self, name, value)
# for backwards compatibility
EBSBlockDeviceType = BlockDeviceType
class BlockDeviceMapping(dict):
"""
Represents a collection of BlockDeviceTypes when creating ec2 instances.
Example:
dev_sda1 = BlockDeviceType()
dev_sda1.size = 100 # change root volume to 100GB instead of default
bdm = BlockDeviceMapping()
bdm['/dev/sda1'] = dev_sda1
reservation = image.run(..., block_device_map=bdm, ...)
"""
def __init__(self, connection=None):
"""
:type connection: :class:`boto.ec2.EC2Connection`
:param connection: Optional connection.
"""
dict.__init__(self)
self.connection = connection
self.current_name = None
self.current_value = None
def startElement(self, name, attrs, connection):
lname = name.lower()
if lname in ['ebs', 'virtualname']:
self.current_value = BlockDeviceType(self)
return self.current_value
def endElement(self, name, value, connection):
lname = name.lower()
if lname in ['device', 'devicename']:
self.current_name = value
elif lname in ['item', 'member']:
self[self.current_name] = self.current_value
def ec2_build_list_params(self, params, prefix=''):
pre = '%sBlockDeviceMapping' % prefix
return self._build_list_params(params, prefix=pre)
def autoscale_build_list_params(self, params, prefix=''):
pre = '%sBlockDeviceMappings.member' % prefix
return self._build_list_params(params, prefix=pre)
def _build_list_params(self, params, prefix=''):
i = 1
for dev_name in self:
pre = '%s.%d' % (prefix, i)
params['%s.DeviceName' % pre] = dev_name
block_dev = self[dev_name]
if block_dev.ephemeral_name:
params['%s.VirtualName' % pre] = block_dev.ephemeral_name
else:
if block_dev.no_device:
params['%s.NoDevice' % pre] = ''
else:
if block_dev.snapshot_id:
params['%s.Ebs.SnapshotId' % pre] = block_dev.snapshot_id
if block_dev.size:
params['%s.Ebs.VolumeSize' % pre] = block_dev.size
if block_dev.delete_on_termination:
params['%s.Ebs.DeleteOnTermination' % pre] = 'true'
else:
params['%s.Ebs.DeleteOnTermination' % pre] = 'false'
if block_dev.volume_type:
params['%s.Ebs.VolumeType' % pre] = block_dev.volume_type
if block_dev.iops is not None:
params['%s.Ebs.Iops' % pre] = block_dev.iops
i += 1
|
roselleebarle04/django | refs/heads/master | django/http/multipartparser.py | 332 | """
Multi-part parsing for file uploads.
Exposes one class, ``MultiPartParser``, which feeds chunks of uploaded data to
file upload handlers for processing.
"""
from __future__ import unicode_literals
import base64
import binascii
import cgi
import sys
from django.conf import settings
from django.core.exceptions import SuspiciousMultipartForm
from django.core.files.uploadhandler import (
SkipFile, StopFutureHandlers, StopUpload,
)
from django.utils import six
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_text
from django.utils.six.moves.urllib.parse import unquote
from django.utils.text import unescape_entities
__all__ = ('MultiPartParser', 'MultiPartParserError', 'InputStreamExhausted')
class MultiPartParserError(Exception):
pass
class InputStreamExhausted(Exception):
"""
No more reads are allowed from this device.
"""
pass
RAW = "raw"
FILE = "file"
FIELD = "field"
_BASE64_DECODE_ERROR = TypeError if six.PY2 else binascii.Error
class MultiPartParser(object):
"""
A rfc2388 multipart/form-data parser.
``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks
and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``.
"""
def __init__(self, META, input_data, upload_handlers, encoding=None):
"""
Initialize the MultiPartParser object.
:META:
The standard ``META`` dictionary in Django request objects.
:input_data:
The raw post data, as a file-like object.
:upload_handlers:
A list of UploadHandler instances that perform operations on the uploaded
data.
:encoding:
The encoding with which to treat the incoming data.
"""
#
# Content-Type should contain multipart and the boundary information.
#
content_type = META.get('HTTP_CONTENT_TYPE', META.get('CONTENT_TYPE', ''))
if not content_type.startswith('multipart/'):
raise MultiPartParserError('Invalid Content-Type: %s' % content_type)
# Parse the header to get the boundary to split the parts.
ctypes, opts = parse_header(content_type.encode('ascii'))
boundary = opts.get('boundary')
if not boundary or not cgi.valid_boundary(boundary):
raise MultiPartParserError('Invalid boundary in multipart: %s' % boundary)
# Content-Length should contain the length of the body we are about
# to receive.
try:
content_length = int(META.get('HTTP_CONTENT_LENGTH', META.get('CONTENT_LENGTH', 0)))
except (ValueError, TypeError):
content_length = 0
if content_length < 0:
# This means we shouldn't continue...raise an error.
raise MultiPartParserError("Invalid content length: %r" % content_length)
if isinstance(boundary, six.text_type):
boundary = boundary.encode('ascii')
self._boundary = boundary
self._input_data = input_data
# For compatibility with low-level network APIs (with 32-bit integers),
# the chunk size should be < 2^31, but still divisible by 4.
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
self._chunk_size = min([2 ** 31 - 4] + possible_sizes)
self._meta = META
self._encoding = encoding or settings.DEFAULT_CHARSET
self._content_length = content_length
self._upload_handlers = upload_handlers
def parse(self):
"""
Parse the POST data and break it into a FILES MultiValueDict and a POST
MultiValueDict.
Returns a tuple containing the POST and FILES dictionary, respectively.
"""
# We have to import QueryDict down here to avoid a circular import.
from django.http import QueryDict
encoding = self._encoding
handlers = self._upload_handlers
# HTTP spec says that Content-Length >= 0 is valid
# handling content-length == 0 before continuing
if self._content_length == 0:
return QueryDict('', encoding=self._encoding), MultiValueDict()
# See if any of the handlers take care of the parsing.
# This allows overriding everything if need be.
for handler in handlers:
result = handler.handle_raw_input(self._input_data,
self._meta,
self._content_length,
self._boundary,
encoding)
# Check to see if it was handled
if result is not None:
return result[0], result[1]
# Create the data structures to be used later.
self._post = QueryDict('', mutable=True)
self._files = MultiValueDict()
# Instantiate the parser and stream:
stream = LazyStream(ChunkIter(self._input_data, self._chunk_size))
# Whether or not to signal a file-completion at the beginning of the loop.
old_field_name = None
counters = [0] * len(handlers)
try:
for item_type, meta_data, field_stream in Parser(stream, self._boundary):
if old_field_name:
# We run this at the beginning of the next loop
# since we cannot be sure a file is complete until
# we hit the next boundary/part of the multipart content.
self.handle_file_complete(old_field_name, counters)
old_field_name = None
try:
disposition = meta_data['content-disposition'][1]
field_name = disposition['name'].strip()
except (KeyError, IndexError, AttributeError):
continue
transfer_encoding = meta_data.get('content-transfer-encoding')
if transfer_encoding is not None:
transfer_encoding = transfer_encoding[0].strip()
field_name = force_text(field_name, encoding, errors='replace')
if item_type == FIELD:
# This is a post field, we can just set it in the post
if transfer_encoding == 'base64':
raw_data = field_stream.read()
try:
data = base64.b64decode(raw_data)
except _BASE64_DECODE_ERROR:
data = raw_data
else:
data = field_stream.read()
self._post.appendlist(field_name,
force_text(data, encoding, errors='replace'))
elif item_type == FILE:
# This is a file, use the handler...
file_name = disposition.get('filename')
if not file_name:
continue
file_name = force_text(file_name, encoding, errors='replace')
file_name = self.IE_sanitize(unescape_entities(file_name))
content_type, content_type_extra = meta_data.get('content-type', ('', {}))
content_type = content_type.strip()
charset = content_type_extra.get('charset')
try:
content_length = int(meta_data.get('content-length')[0])
except (IndexError, TypeError, ValueError):
content_length = None
counters = [0] * len(handlers)
try:
for handler in handlers:
try:
handler.new_file(field_name, file_name,
content_type, content_length,
charset, content_type_extra)
except StopFutureHandlers:
break
for chunk in field_stream:
if transfer_encoding == 'base64':
# We only special-case base64 transfer encoding
# We should always decode base64 chunks by multiple of 4,
# ignoring whitespace.
stripped_chunk = b"".join(chunk.split())
remaining = len(stripped_chunk) % 4
while remaining != 0:
over_chunk = field_stream.read(4 - remaining)
stripped_chunk += b"".join(over_chunk.split())
remaining = len(stripped_chunk) % 4
try:
chunk = base64.b64decode(stripped_chunk)
except Exception as e:
# Since this is only a chunk, any error is an unfixable error.
msg = "Could not decode base64 data: %r" % e
six.reraise(MultiPartParserError, MultiPartParserError(msg), sys.exc_info()[2])
for i, handler in enumerate(handlers):
chunk_length = len(chunk)
chunk = handler.receive_data_chunk(chunk,
counters[i])
counters[i] += chunk_length
if chunk is None:
# If the chunk received by the handler is None, then don't continue.
break
except SkipFile:
self._close_files()
# Just use up the rest of this file...
exhaust(field_stream)
else:
# Handle file upload completions on next iteration.
old_field_name = field_name
else:
# If this is neither a FIELD or a FILE, just exhaust the stream.
exhaust(stream)
except StopUpload as e:
self._close_files()
if not e.connection_reset:
exhaust(self._input_data)
else:
# Make sure that the request data is all fed
exhaust(self._input_data)
# Signal that the upload has completed.
for handler in handlers:
retval = handler.upload_complete()
if retval:
break
return self._post, self._files
def handle_file_complete(self, old_field_name, counters):
"""
Handle all the signaling that takes place when a file is complete.
"""
for i, handler in enumerate(self._upload_handlers):
file_obj = handler.file_complete(counters[i])
if file_obj:
# If it returns a file object, then set the files dict.
self._files.appendlist(
force_text(old_field_name, self._encoding, errors='replace'),
file_obj)
break
def IE_sanitize(self, filename):
"""Cleanup filename from Internet Explorer full paths."""
return filename and filename[filename.rfind("\\") + 1:].strip()
def _close_files(self):
# Free up all file handles.
# FIXME: this currently assumes that upload handlers store the file as 'file'
# We should document that... (Maybe add handler.free_file to complement new_file)
for handler in self._upload_handlers:
if hasattr(handler, 'file'):
handler.file.close()
class LazyStream(six.Iterator):
"""
The LazyStream wrapper allows one to get and "unget" bytes from a stream.
Given a producer object (an iterator that yields bytestrings), the
LazyStream object will support iteration, reading, and keeping a "look-back"
variable in case you need to "unget" some bytes.
"""
def __init__(self, producer, length=None):
"""
Every LazyStream must have a producer when instantiated.
A producer is an iterable that returns a string each time it
is called.
"""
self._producer = producer
self._empty = False
self._leftover = b''
self.length = length
self.position = 0
self._remaining = length
self._unget_history = []
def tell(self):
return self.position
def read(self, size=None):
def parts():
remaining = self._remaining if size is None else size
# do the whole thing in one shot if no limit was provided.
if remaining is None:
yield b''.join(self)
return
# otherwise do some bookkeeping to return exactly enough
# of the stream and stashing any extra content we get from
# the producer
while remaining != 0:
assert remaining > 0, 'remaining bytes to read should never go negative'
try:
chunk = next(self)
except StopIteration:
return
else:
emitting = chunk[:remaining]
self.unget(chunk[remaining:])
remaining -= len(emitting)
yield emitting
out = b''.join(parts())
return out
def __next__(self):
"""
Used when the exact number of bytes to read is unimportant.
This procedure just returns whatever is chunk is conveniently returned
from the iterator instead. Useful to avoid unnecessary bookkeeping if
performance is an issue.
"""
if self._leftover:
output = self._leftover
self._leftover = b''
else:
output = next(self._producer)
self._unget_history = []
self.position += len(output)
return output
def close(self):
"""
Used to invalidate/disable this lazy stream.
Replaces the producer with an empty list. Any leftover bytes that have
already been read will still be reported upon read() and/or next().
"""
self._producer = []
def __iter__(self):
return self
def unget(self, bytes):
"""
Places bytes back onto the front of the lazy stream.
Future calls to read() will return those bytes first. The
stream position and thus tell() will be rewound.
"""
if not bytes:
return
self._update_unget_history(len(bytes))
self.position -= len(bytes)
self._leftover = b''.join([bytes, self._leftover])
def _update_unget_history(self, num_bytes):
"""
Updates the unget history as a sanity check to see if we've pushed
back the same number of bytes in one chunk. If we keep ungetting the
same number of bytes many times (here, 50), we're mostly likely in an
infinite loop of some sort. This is usually caused by a
maliciously-malformed MIME request.
"""
self._unget_history = [num_bytes] + self._unget_history[:49]
number_equal = len([current_number for current_number in self._unget_history
if current_number == num_bytes])
if number_equal > 40:
raise SuspiciousMultipartForm(
"The multipart parser got stuck, which shouldn't happen with"
" normal uploaded files. Check for malicious upload activity;"
" if there is none, report this to the Django developers."
)
class ChunkIter(six.Iterator):
"""
An iterable that will yield chunks of data. Given a file-like object as the
constructor, this object will yield chunks of read operations from that
object.
"""
def __init__(self, flo, chunk_size=64 * 1024):
self.flo = flo
self.chunk_size = chunk_size
def __next__(self):
try:
data = self.flo.read(self.chunk_size)
except InputStreamExhausted:
raise StopIteration()
if data:
return data
else:
raise StopIteration()
def __iter__(self):
return self
class InterBoundaryIter(six.Iterator):
"""
A Producer that will iterate over boundaries.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
def __iter__(self):
return self
def __next__(self):
try:
return LazyStream(BoundaryIter(self._stream, self._boundary))
except InputStreamExhausted:
raise StopIteration()
class BoundaryIter(six.Iterator):
"""
A Producer that is sensitive to boundaries.
Will happily yield bytes until a boundary is found. Will yield the bytes
before the boundary, throw away the boundary bytes themselves, and push the
post-boundary bytes back on the stream.
The future calls to next() after locating the boundary will raise a
StopIteration exception.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
self._done = False
# rollback an additional six bytes because the format is like
# this: CRLF<boundary>[--CRLF]
self._rollback = len(boundary) + 6
# Try to use mx fast string search if available. Otherwise
# use Python find. Wrap the latter for consistency.
unused_char = self._stream.read(1)
if not unused_char:
raise InputStreamExhausted()
self._stream.unget(unused_char)
def __iter__(self):
return self
def __next__(self):
if self._done:
raise StopIteration()
stream = self._stream
rollback = self._rollback
bytes_read = 0
chunks = []
for bytes in stream:
bytes_read += len(bytes)
chunks.append(bytes)
if bytes_read > rollback:
break
if not bytes:
break
else:
self._done = True
if not chunks:
raise StopIteration()
chunk = b''.join(chunks)
boundary = self._find_boundary(chunk, len(chunk) < self._rollback)
if boundary:
end, next = boundary
stream.unget(chunk[next:])
self._done = True
return chunk[:end]
else:
# make sure we don't treat a partial boundary (and
# its separators) as data
if not chunk[:-rollback]: # and len(chunk) >= (len(self._boundary) + 6):
# There's nothing left, we should just return and mark as done.
self._done = True
return chunk
else:
stream.unget(chunk[-rollback:])
return chunk[:-rollback]
def _find_boundary(self, data, eof=False):
"""
Finds a multipart boundary in data.
Should no boundary exist in the data None is returned instead. Otherwise
a tuple containing the indices of the following are returned:
* the end of current encapsulation
* the start of the next encapsulation
"""
index = data.find(self._boundary)
if index < 0:
return None
else:
end = index
next = index + len(self._boundary)
# backup over CRLF
last = max(0, end - 1)
if data[last:last + 1] == b'\n':
end -= 1
last = max(0, end - 1)
if data[last:last + 1] == b'\r':
end -= 1
return end, next
def exhaust(stream_or_iterable):
"""
Completely exhausts an iterator or stream.
Raise a MultiPartParserError if the argument is not a stream or an iterable.
"""
iterator = None
try:
iterator = iter(stream_or_iterable)
except TypeError:
iterator = ChunkIter(stream_or_iterable, 16384)
if iterator is None:
raise MultiPartParserError('multipartparser.exhaust() was passed a non-iterable or stream parameter')
for __ in iterator:
pass
def parse_boundary_stream(stream, max_header_size):
"""
Parses one and exactly one stream that encapsulates a boundary.
"""
# Stream at beginning of header, look for end of header
# and parse it if found. The header must fit within one
# chunk.
chunk = stream.read(max_header_size)
# 'find' returns the top of these four bytes, so we'll
# need to munch them later to prevent them from polluting
# the payload.
header_end = chunk.find(b'\r\n\r\n')
def _parse_header(line):
main_value_pair, params = parse_header(line)
try:
name, value = main_value_pair.split(':', 1)
except ValueError:
raise ValueError("Invalid header: %r" % line)
return name, (value, params)
if header_end == -1:
# we find no header, so we just mark this fact and pass on
# the stream verbatim
stream.unget(chunk)
return (RAW, {}, stream)
header = chunk[:header_end]
# here we place any excess chunk back onto the stream, as
# well as throwing away the CRLFCRLF bytes from above.
stream.unget(chunk[header_end + 4:])
TYPE = RAW
outdict = {}
# Eliminate blank lines
for line in header.split(b'\r\n'):
# This terminology ("main value" and "dictionary of
# parameters") is from the Python docs.
try:
name, (value, params) = _parse_header(line)
except ValueError:
continue
if name == 'content-disposition':
TYPE = FIELD
if params.get('filename'):
TYPE = FILE
outdict[name] = value, params
if TYPE == RAW:
stream.unget(chunk)
return (TYPE, outdict, stream)
class Parser(object):
def __init__(self, stream, boundary):
self._stream = stream
self._separator = b'--' + boundary
def __iter__(self):
boundarystream = InterBoundaryIter(self._stream, self._separator)
for sub_stream in boundarystream:
# Iterate over each part
yield parse_boundary_stream(sub_stream, 1024)
def parse_header(line):
""" Parse the header into a key-value.
Input (line): bytes, output: unicode for key/name, bytes for value which
will be decoded later
"""
plist = _parse_header_params(b';' + line)
key = plist.pop(0).lower().decode('ascii')
pdict = {}
for p in plist:
i = p.find(b'=')
if i >= 0:
has_encoding = False
name = p[:i].strip().lower().decode('ascii')
if name.endswith('*'):
# Lang/encoding embedded in the value (like "filename*=UTF-8''file.ext")
# http://tools.ietf.org/html/rfc2231#section-4
name = name[:-1]
if p.count(b"'") == 2:
has_encoding = True
value = p[i + 1:].strip()
if has_encoding:
encoding, lang, value = value.split(b"'")
if six.PY3:
value = unquote(value.decode(), encoding=encoding.decode())
else:
value = unquote(value).decode(encoding)
if len(value) >= 2 and value[:1] == value[-1:] == b'"':
value = value[1:-1]
value = value.replace(b'\\\\', b'\\').replace(b'\\"', b'"')
pdict[name] = value
return key, pdict
def _parse_header_params(s):
plist = []
while s[:1] == b';':
s = s[1:]
end = s.find(b';')
while end > 0 and s.count(b'"', 0, end) % 2:
end = s.find(b';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
plist.append(f.strip())
s = s[end:]
return plist
|
holmes/intellij-community | refs/heads/master | python/testData/inspections/PyClassHasNoInitInspection/parentClass.py | 83 | __author__ = 'ktisha'
class <weak_warning descr="Class has no __init__ method">A</weak_warning>:
def foo(self):
self.b = 1
class B(A):
def __init__(self):
self.b = 2
|
LingxiaoJIA/gem5 | refs/heads/master | tests/configs/simple-timing.py | 69 | # Copyright (c) 2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Hansson
from m5.objects import *
from base_config import *
root = BaseSESystemUniprocessor(mem_mode='timing',
cpu_class=TimingSimpleCPU).create_root()
|
sYnfo/samba | refs/heads/master | buildtools/wafsamba/samba_bundled.py | 19 | # functions to support bundled libraries
from Configure import conf
import sys, Logs
from samba_utils import *
def PRIVATE_NAME(bld, name, private_extension, private_library):
'''possibly rename a library to include a bundled extension'''
if not private_library:
return name
# we now use the same private name for libraries as the public name.
# see http://git.samba.org/?p=tridge/junkcode.git;a=tree;f=shlib for a
# demonstration that this is the right thing to do
# also see http://lists.samba.org/archive/samba-technical/2011-January/075816.html
if private_extension:
return name
extension = bld.env.PRIVATE_EXTENSION
if extension and name.startswith('%s' % extension):
return name
if extension and name.endswith('%s' % extension):
return name
return "%s-%s" % (name, extension)
def target_in_list(target, lst, default):
for l in lst:
if target == l:
return True
if '!' + target == l:
return False
if l == 'ALL':
return True
if l == 'NONE':
return False
return default
def BUILTIN_LIBRARY(bld, name):
'''return True if a library should be builtin
instead of being built as a shared lib'''
return target_in_list(name, bld.env.BUILTIN_LIBRARIES, False)
Build.BuildContext.BUILTIN_LIBRARY = BUILTIN_LIBRARY
def BUILTIN_DEFAULT(opt, builtins):
'''set a comma separated default list of builtin libraries for this package'''
if 'BUILTIN_LIBRARIES_DEFAULT' in Options.options:
return
Options.options['BUILTIN_LIBRARIES_DEFAULT'] = builtins
Options.Handler.BUILTIN_DEFAULT = BUILTIN_DEFAULT
def PRIVATE_EXTENSION_DEFAULT(opt, extension, noextension=''):
'''set a default private library extension'''
if 'PRIVATE_EXTENSION_DEFAULT' in Options.options:
return
Options.options['PRIVATE_EXTENSION_DEFAULT'] = extension
Options.options['PRIVATE_EXTENSION_EXCEPTION'] = noextension
Options.Handler.PRIVATE_EXTENSION_DEFAULT = PRIVATE_EXTENSION_DEFAULT
def minimum_library_version(conf, libname, default):
'''allow override of mininum system library version'''
minlist = Options.options.MINIMUM_LIBRARY_VERSION
if not minlist:
return default
for m in minlist.split(','):
a = m.split(':')
if len(a) != 2:
Logs.error("Bad syntax for --minimum-library-version of %s" % m)
sys.exit(1)
if a[0] == libname:
return a[1]
return default
@conf
def LIB_MAY_BE_BUNDLED(conf, libname):
if libname in conf.env.BUNDLED_LIBS:
return True
if '!%s' % libname in conf.env.BUNDLED_LIBS:
return False
if 'NONE' in conf.env.BUNDLED_LIBS:
return False
return True
@conf
def LIB_MUST_BE_BUNDLED(conf, libname):
if libname in conf.env.BUNDLED_LIBS:
return True
if '!%s' % libname in conf.env.BUNDLED_LIBS:
return False
if 'ALL' in conf.env.BUNDLED_LIBS:
return True
return False
@conf
def LIB_MUST_BE_PRIVATE(conf, libname):
return ('ALL' in conf.env.PRIVATE_LIBS or
libname in conf.env.PRIVATE_LIBS)
@conf
def CHECK_PREREQUISITES(conf, prereqs):
missing = []
for syslib in TO_LIST(prereqs):
f = 'FOUND_SYSTEMLIB_%s' % syslib
if not f in conf.env:
missing.append(syslib)
return missing
@runonce
@conf
def CHECK_BUNDLED_SYSTEM_PKG(conf, libname, minversion='0.0.0',
onlyif=None, implied_deps=None, pkg=None):
'''check if a library is available as a system library.
This only tries using pkg-config
'''
return conf.CHECK_BUNDLED_SYSTEM(libname,
minversion=minversion,
onlyif=onlyif,
implied_deps=implied_deps,
pkg=pkg)
@runonce
@conf
def CHECK_BUNDLED_SYSTEM(conf, libname, minversion='0.0.0',
checkfunctions=None, headers=None, checkcode=None,
onlyif=None, implied_deps=None,
require_headers=True, pkg=None, set_target=True):
'''check if a library is available as a system library.
this first tries via pkg-config, then if that fails
tries by testing for a specified function in the specified lib
'''
if conf.LIB_MUST_BE_BUNDLED(libname):
return False
found = 'FOUND_SYSTEMLIB_%s' % libname
if found in conf.env:
return conf.env[found]
def check_functions_headers_code():
'''helper function for CHECK_BUNDLED_SYSTEM'''
if require_headers and headers and not conf.CHECK_HEADERS(headers, lib=libname):
return False
if checkfunctions is not None:
ok = conf.CHECK_FUNCS_IN(checkfunctions, libname, headers=headers,
empty_decl=False, set_target=False)
if not ok:
return False
if checkcode is not None:
define='CHECK_BUNDLED_SYSTEM_%s' % libname.upper()
ok = conf.CHECK_CODE(checkcode, lib=libname,
headers=headers, local_include=False,
msg=msg, define=define)
conf.CONFIG_RESET(define)
if not ok:
return False
return True
# see if the library should only use a system version if another dependent
# system version is found. That prevents possible use of mixed library
# versions
if onlyif:
missing = conf.CHECK_PREREQUISITES(onlyif)
if missing:
if not conf.LIB_MAY_BE_BUNDLED(libname):
Logs.error('ERROR: Use of system library %s depends on missing system library/libraries %r' % (libname, missing))
sys.exit(1)
conf.env[found] = False
return False
minversion = minimum_library_version(conf, libname, minversion)
msg = 'Checking for system %s' % libname
if minversion != '0.0.0':
msg += ' >= %s' % minversion
uselib_store=libname.upper()
if pkg is None:
pkg = libname
# try pkgconfig first
if (conf.CHECK_CFG(package=pkg,
args='"%s >= %s" --cflags --libs' % (pkg, minversion),
msg=msg, uselib_store=uselib_store) and
check_functions_headers_code()):
if set_target:
conf.SET_TARGET_TYPE(libname, 'SYSLIB')
conf.env[found] = True
if implied_deps:
conf.SET_SYSLIB_DEPS(libname, implied_deps)
return True
if checkfunctions is not None:
if check_functions_headers_code():
conf.env[found] = True
if implied_deps:
conf.SET_SYSLIB_DEPS(libname, implied_deps)
if set_target:
conf.SET_TARGET_TYPE(libname, 'SYSLIB')
return True
conf.env[found] = False
if not conf.LIB_MAY_BE_BUNDLED(libname):
Logs.error('ERROR: System library %s of version %s not found, and bundling disabled' % (libname, minversion))
sys.exit(1)
return False
def tuplize_version(version):
return tuple([int(x) for x in version.split(".")])
@runonce
@conf
def CHECK_BUNDLED_SYSTEM_PYTHON(conf, libname, modulename, minversion='0.0.0'):
'''check if a python module is available on the system and
has the specified minimum version.
'''
if conf.LIB_MUST_BE_BUNDLED(libname):
return False
# see if the library should only use a system version if another dependent
# system version is found. That prevents possible use of mixed library
# versions
minversion = minimum_library_version(conf, libname, minversion)
try:
m = __import__(modulename)
except ImportError:
found = False
else:
try:
version = m.__version__
except AttributeError:
found = False
else:
found = tuplize_version(version) >= tuplize_version(minversion)
if not found and not conf.LIB_MAY_BE_BUNDLED(libname):
Logs.error('ERROR: Python module %s of version %s not found, and bundling disabled' % (libname, minversion))
sys.exit(1)
return found
def NONSHARED_BINARY(bld, name):
'''return True if a binary should be built without non-system shared libs'''
return target_in_list(name, bld.env.NONSHARED_BINARIES, False)
Build.BuildContext.NONSHARED_BINARY = NONSHARED_BINARY
|
ashwin01/hasjob | refs/heads/master | shell.py | 3 | #!/usr/bin/env python
import os
import readline
from pprint import pprint
from flask import *
from hasjob import *
os.environ['PYTHONINSPECT'] = 'True'
|
tokyo-jesus/greyskull | refs/heads/master | greyskull/handlers/tubes.py | 2 | # -*- coding: utf-8 -*-
from tornado.web import RequestHandler
class Index(RequestHandler):
def get(self):
pass
class Redirect(RequestHandler):
def get(self):
pass
|
gregdek/ansible | refs/heads/devel | test/units/module_utils/aws/test_aws_module.py | 68 | # -*- coding: utf-8 -*-
# (c) 2017, Michael De La Rue
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
from pytest import importorskip
import unittest
from ansible.module_utils import basic
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils._text import to_bytes
from units.compat.mock import Mock, patch
import json
importorskip("boto3")
botocore = importorskip("botocore")
class AWSModuleTestCase(unittest.TestCase):
basic._ANSIBLE_ARGS = to_bytes(json.dumps({'ANSIBLE_MODULE_ARGS': {'_ansible_tmpdir': '/tmp/ansible-abc'}}))
def test_create_aws_module_should_set_up_params(self):
m = AnsibleAWSModule(argument_spec=dict(
win_string_arg=dict(type='list', default=['win'])
))
m_noretry_no_customargs = AnsibleAWSModule(
auto_retry=False, default_args=False,
argument_spec=dict(
success_string_arg=dict(type='list', default=['success'])
)
)
assert m, "module wasn't true!!"
assert m_noretry_no_customargs, "module wasn't true!!"
m_params = m.params
m_no_defs_params = m_noretry_no_customargs.params
assert 'region' in m_params
assert 'win' in m_params["win_string_arg"]
assert 'success' in m_no_defs_params["success_string_arg"]
assert 'aws_secret_key' not in m_no_defs_params
class ErrorReportingTestcase(unittest.TestCase):
def test_botocore_exception_reports_nicely_via_fail_json_aws(self):
basic._ANSIBLE_ARGS = to_bytes(json.dumps({'ANSIBLE_MODULE_ARGS': {'_ansible_tmpdir': '/tmp/ansible-abc'}}))
module = AnsibleAWSModule(argument_spec=dict(
fail_mode=dict(type='list', default=['success'])
))
fail_json_double = Mock()
err_msg = {'Error': {'Code': 'FakeClass.FakeError'}}
with patch.object(basic.AnsibleModule, 'fail_json', fail_json_double):
try:
raise botocore.exceptions.ClientError(err_msg, 'Could not find you')
except Exception as e:
print("exception is " + str(e))
module.fail_json_aws(e, msg="Fake failure for testing boto exception messages")
assert(len(fail_json_double.mock_calls) >
0), "failed to call fail_json when should have"
assert(len(fail_json_double.mock_calls) <
2), "called fail_json multiple times when once would do"
assert("test_botocore_exception_reports_nicely"
in fail_json_double.mock_calls[0][2]["exception"]), \
"exception traceback doesn't include correct function, fail call was actually: " \
+ str(fail_json_double.mock_calls[0])
assert("Fake failure for testing boto exception messages:"
in fail_json_double.mock_calls[0][2]["msg"]), \
"error message doesn't include the local message; was: " \
+ str(fail_json_double.mock_calls[0])
assert("Could not find you" in fail_json_double.mock_calls[0][2]["msg"]), \
"error message doesn't include the botocore exception message; was: " \
+ str(fail_json_double.mock_calls[0])
try:
fail_json_double.mock_calls[0][2]["error"]
except KeyError:
raise Exception("error was missing; call was: " + str(fail_json_double.mock_calls[0]))
assert("FakeClass.FakeError" == fail_json_double.mock_calls[0][2]["error"]["code"]), \
"Failed to find error/code; was: " + str(fail_json_double.mock_calls[0])
def test_botocore_exception_without_response_reports_nicely_via_fail_json_aws(self):
basic._ANSIBLE_ARGS = to_bytes(json.dumps({'ANSIBLE_MODULE_ARGS': {'_ansible_tmpdir': '/tmp/ansible-abc'}}))
module = AnsibleAWSModule(argument_spec=dict(
fail_mode=dict(type='list', default=['success'])
))
fail_json_double = Mock()
err_msg = None
with patch.object(basic.AnsibleModule, 'fail_json', fail_json_double):
try:
raise botocore.exceptions.ClientError(err_msg, 'Could not find you')
except Exception as e:
print("exception is " + str(e))
module.fail_json_aws(e, msg="Fake failure for testing boto exception messages")
assert(len(fail_json_double.mock_calls) > 0), "failed to call fail_json when should have"
assert(len(fail_json_double.mock_calls) < 2), "called fail_json multiple times"
assert("test_botocore_exception_without_response_reports_nicely_via_fail_json_aws"
in fail_json_double.mock_calls[0][2]["exception"]), \
"exception traceback doesn't include correct function, fail call was actually: " \
+ str(fail_json_double.mock_calls[0])
assert("Fake failure for testing boto exception messages"
in fail_json_double.mock_calls[0][2]["msg"]), \
"error message doesn't include the local message; was: " \
+ str(fail_json_double.mock_calls[0])
# I would have thought this should work, however the botocore exception comes back with
# "argument of type 'NoneType' is not iterable" so it's probably not really designed
# to handle "None" as an error response.
#
# assert("Could not find you" in fail_json_double.mock_calls[0][2]["msg"]), \
# "error message doesn't include the botocore exception message; was: " \
# + str(fail_json_double.mock_calls[0])
# TODO:
# - an exception without a message
# - plain boto exception
# - socket errors and other standard things.
|
whereismyjetpack/ansible | refs/heads/devel | lib/ansible/playbook/attribute.py | 46 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from copy import deepcopy
class Attribute:
def __init__(self, isa=None, private=False, default=None, required=False, listof=None, priority=0, class_type=None, always_post_validate=False, inherit=True):
"""
:class:`Attribute` specifies constraints for attributes of objects which
derive from playbook data. The attributes of the object are basically
a schema for the yaml playbook.
:kwarg isa: The type of the attribute. Allowable values are a string
representation of any yaml basic datatype, python class, or percent.
(Enforced at post-validation time).
:kwarg private: (not used)
:kwarg default: Default value if unspecified in the YAML document.
:kwarg required: Whether or not the YAML document must contain this field.
If the attribute is None when post-validated, an error will be raised.
:kwarg listof: If isa is set to "list", this can optionally be set to
ensure that all elements in the list are of the given type. Valid
values here are the same as those for isa.
:kwarg priority: The order in which the fields should be parsed. Generally
this does not need to be set, it is for rare situations where another
field depends on the fact that another field was parsed first.
:kwarg class_type: If isa is set to "class", this can be optionally set to
a class (not a string name). The YAML data for this field will be
passed to the __init__ method of that class during post validation and
the field will be an instance of that class.
:kwarg always_post_validate: Controls whether a field should be post
validated or not (default: True).
:kwarg inherit: A boolean value, which controls whether the object
containing this field should attempt to inherit the value from its
parent object if the local value is None.
"""
self.isa = isa
self.private = private
self.default = default
self.required = required
self.listof = listof
self.priority = priority
self.class_type = class_type
self.always_post_validate = always_post_validate
self.inherit = inherit
if default is not None and self.isa in ('list', 'dict', 'set'):
self.default = deepcopy(default)
else:
self.default = default
def __eq__(self, other):
return other.priority == self.priority
def __ne__(self, other):
return other.priority != self.priority
# NB: higher priority numbers sort first
def __lt__(self, other):
return other.priority < self.priority
def __gt__(self, other):
return other.priority > self.priority
def __le__(self, other):
return other.priority <= self.priority
def __ge__(self, other):
return other.priority >= self.priority
class FieldAttribute(Attribute):
pass
|
cerebis/i3seqdb | refs/heads/master | load_dna_concentrations.py | 1 | #!/usr/bin/env python
import argparse
import pandas
import pymongo
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Load DNA concentrations from a PicoGreen plate assay')
parser.add_argument('table', help='Excel spreadsheet from plate reader')
parser.add_argument('plate-id', help='ID of the plate')
args = parser.parse_args()
table = pandas.read_excel(args.table, encoding='ascii',skiprows=24,index_col=0,skip_footer=1)
concs = table.as_matrix()
client = MongoClient('mongodb://localhost:27017/')
db = client.robolims_database
plate = db.platemap.find_one({"plate_id": args.plate_id})
for row in range(concs.shape[0]):
for col in range(concs.shape[1]):
sample_id = plate['wells'][row][col]
sample = db.samples.find_one({"sample_id": sample_id})
sample.concentration = concs[row,col]
db.samples.update_one(sample)
|
QianBIG/odoo | refs/heads/8.0 | openerp/http.py | 66 | # -*- coding: utf-8 -*-
#----------------------------------------------------------
# OpenERP HTTP layer
#----------------------------------------------------------
import ast
import collections
import contextlib
import datetime
import errno
import functools
import getpass
import inspect
import logging
import mimetypes
import os
import pprint
import random
import re
import sys
import tempfile
import threading
import time
import traceback
import urlparse
import warnings
from zlib import adler32
import babel.core
import psycopg2
import simplejson
import werkzeug.contrib.sessions
import werkzeug.datastructures
import werkzeug.exceptions
import werkzeug.local
import werkzeug.routing
import werkzeug.wrappers
import werkzeug.wsgi
from werkzeug.wsgi import wrap_file
try:
import psutil
except ImportError:
psutil = None
import openerp
from openerp import SUPERUSER_ID
from openerp.service.server import memory_info
from openerp.service import security, model as service_model
from openerp.tools.func import lazy_property
from openerp.tools import ustr
_logger = logging.getLogger(__name__)
rpc_request = logging.getLogger(__name__ + '.rpc.request')
rpc_response = logging.getLogger(__name__ + '.rpc.response')
# 1 week cache for statics as advised by Google Page Speed
STATIC_CACHE = 60 * 60 * 24 * 7
#----------------------------------------------------------
# RequestHandler
#----------------------------------------------------------
# Thread local global request object
_request_stack = werkzeug.local.LocalStack()
request = _request_stack()
"""
A global proxy that always redirect to the current request object.
"""
def replace_request_password(args):
# password is always 3rd argument in a request, we replace it in RPC logs
# so it's easier to forward logs for diagnostics/debugging purposes...
if len(args) > 2:
args = list(args)
args[2] = '*'
return tuple(args)
# don't trigger debugger for those exceptions, they carry user-facing warnings
# and indications, they're not necessarily indicative of anything being
# *broken*
NO_POSTMORTEM = (openerp.osv.orm.except_orm,
openerp.exceptions.AccessError,
openerp.exceptions.AccessDenied,
openerp.exceptions.Warning,
openerp.exceptions.RedirectWarning)
def dispatch_rpc(service_name, method, params):
""" Handle a RPC call.
This is pure Python code, the actual marshalling (from/to XML-RPC) is done
in a upper layer.
"""
try:
rpc_request_flag = rpc_request.isEnabledFor(logging.DEBUG)
rpc_response_flag = rpc_response.isEnabledFor(logging.DEBUG)
if rpc_request_flag or rpc_response_flag:
start_time = time.time()
start_rss, start_vms = 0, 0
if psutil:
start_rss, start_vms = memory_info(psutil.Process(os.getpid()))
if rpc_request and rpc_response_flag:
openerp.netsvc.log(rpc_request, logging.DEBUG, '%s.%s' % (service_name, method), replace_request_password(params))
threading.current_thread().uid = None
threading.current_thread().dbname = None
if service_name == 'common':
dispatch = openerp.service.common.dispatch
elif service_name == 'db':
dispatch = openerp.service.db.dispatch
elif service_name == 'object':
dispatch = openerp.service.model.dispatch
elif service_name == 'report':
dispatch = openerp.service.report.dispatch
else:
dispatch = openerp.service.wsgi_server.rpc_handlers.get(service_name)
result = dispatch(method, params)
if rpc_request_flag or rpc_response_flag:
end_time = time.time()
end_rss, end_vms = 0, 0
if psutil:
end_rss, end_vms = memory_info(psutil.Process(os.getpid()))
logline = '%s.%s time:%.3fs mem: %sk -> %sk (diff: %sk)' % (service_name, method, end_time - start_time, start_vms / 1024, end_vms / 1024, (end_vms - start_vms)/1024)
if rpc_response_flag:
openerp.netsvc.log(rpc_response, logging.DEBUG, logline, result)
else:
openerp.netsvc.log(rpc_request, logging.DEBUG, logline, replace_request_password(params), depth=1)
return result
except NO_POSTMORTEM:
raise
except openerp.exceptions.DeferredException, e:
_logger.exception(openerp.tools.exception_to_unicode(e))
openerp.tools.debugger.post_mortem(openerp.tools.config, e.traceback)
raise
except Exception, e:
_logger.exception(openerp.tools.exception_to_unicode(e))
openerp.tools.debugger.post_mortem(openerp.tools.config, sys.exc_info())
raise
def local_redirect(path, query=None, keep_hash=False, forward_debug=True, code=303):
url = path
if not query:
query = {}
if forward_debug and request and request.debug:
query['debug'] = None
if query:
url += '?' + werkzeug.url_encode(query)
if keep_hash:
return redirect_with_hash(url, code)
else:
return werkzeug.utils.redirect(url, code)
def redirect_with_hash(url, code=303):
# Most IE and Safari versions decided not to preserve location.hash upon
# redirect. And even if IE10 pretends to support it, it still fails
# inexplicably in case of multiple redirects (and we do have some).
# See extensive test page at http://greenbytes.de/tech/tc/httpredirects/
if request.httprequest.user_agent.browser in ('firefox',):
return werkzeug.utils.redirect(url, code)
return "<html><head><script>window.location = '%s' + location.hash;</script></head></html>" % url
class WebRequest(object):
""" Parent class for all Odoo Web request types, mostly deals with
initialization and setup of the request object (the dispatching itself has
to be handled by the subclasses)
:param httprequest: a wrapped werkzeug Request object
:type httprequest: :class:`werkzeug.wrappers.BaseRequest`
.. attribute:: httprequest
the original :class:`werkzeug.wrappers.Request` object provided to the
request
.. attribute:: params
:class:`~collections.Mapping` of request parameters, not generally
useful as they're provided directly to the handler method as keyword
arguments
"""
def __init__(self, httprequest):
self.httprequest = httprequest
self.httpresponse = None
self.httpsession = httprequest.session
self.disable_db = False
self.uid = None
self.endpoint = None
self.auth_method = None
self._cr = None
# prevents transaction commit, use when you catch an exception during handling
self._failed = None
# set db/uid trackers - they're cleaned up at the WSGI
# dispatching phase in openerp.service.wsgi_server.application
if self.db:
threading.current_thread().dbname = self.db
if self.session.uid:
threading.current_thread().uid = self.session.uid
@lazy_property
def env(self):
"""
The :class:`~openerp.api.Environment` bound to current request.
Raises a :class:`RuntimeError` if the current requests is not bound
to a database.
"""
if not self.db:
return RuntimeError('request not bound to a database')
return openerp.api.Environment(self.cr, self.uid, self.context)
@lazy_property
def context(self):
"""
:class:`~collections.Mapping` of context values for the current
request
"""
return dict(self.session.context)
@lazy_property
def lang(self):
self.session._fix_lang(self.context)
return self.context["lang"]
@lazy_property
def session(self):
"""
a :class:`OpenERPSession` holding the HTTP session data for the
current http session
"""
return self.httprequest.session
@property
def cr(self):
"""
:class:`~openerp.sql_db.Cursor` initialized for the current method
call.
Accessing the cursor when the current request uses the ``none``
authentication will raise an exception.
"""
# can not be a lazy_property because manual rollback in _call_function
# if already set (?)
if not self.db:
return RuntimeError('request not bound to a database')
if not self._cr:
self._cr = self.registry.cursor()
return self._cr
def __enter__(self):
_request_stack.push(self)
return self
def __exit__(self, exc_type, exc_value, traceback):
_request_stack.pop()
if self._cr:
if exc_type is None and not self._failed:
self._cr.commit()
self._cr.close()
# just to be sure no one tries to re-use the request
self.disable_db = True
self.uid = None
def set_handler(self, endpoint, arguments, auth):
# is this needed ?
arguments = dict((k, v) for k, v in arguments.iteritems()
if not k.startswith("_ignored_"))
endpoint.arguments = arguments
self.endpoint = endpoint
self.auth_method = auth
def _handle_exception(self, exception):
"""Called within an except block to allow converting exceptions
to abitrary responses. Anything returned (except None) will
be used as response."""
self._failed = exception # prevent tx commit
if not isinstance(exception, NO_POSTMORTEM) \
and not isinstance(exception, werkzeug.exceptions.HTTPException):
openerp.tools.debugger.post_mortem(
openerp.tools.config, sys.exc_info())
raise
def _call_function(self, *args, **kwargs):
request = self
if self.endpoint.routing['type'] != self._request_type:
msg = "%s, %s: Function declared as capable of handling request of type '%s' but called with a request of type '%s'"
params = (self.endpoint.original, self.httprequest.path, self.endpoint.routing['type'], self._request_type)
_logger.error(msg, *params)
raise werkzeug.exceptions.BadRequest(msg % params)
kwargs.update(self.endpoint.arguments)
# Backward for 7.0
if self.endpoint.first_arg_is_req:
args = (request,) + args
# Correct exception handling and concurency retry
@service_model.check
def checked_call(___dbname, *a, **kw):
# The decorator can call us more than once if there is an database error. In this
# case, the request cursor is unusable. Rollback transaction to create a new one.
if self._cr:
self._cr.rollback()
return self.endpoint(*a, **kw)
if self.db:
return checked_call(self.db, *args, **kwargs)
return self.endpoint(*args, **kwargs)
@property
def debug(self):
""" Indicates whether the current request is in "debug" mode
"""
return 'debug' in self.httprequest.args
@contextlib.contextmanager
def registry_cr(self):
warnings.warn('please use request.registry and request.cr directly', DeprecationWarning)
yield (self.registry, self.cr)
@lazy_property
def session_id(self):
"""
opaque identifier for the :class:`OpenERPSession` instance of
the current request
.. deprecated:: 8.0
Use the ``sid`` attribute on :attr:`.session`
"""
return self.session.sid
@property
def registry(self):
"""
The registry to the database linked to this request. Can be ``None``
if the current request uses the ``none`` authentication.
.. deprecated:: 8.0
use :attr:`.env`
"""
return openerp.modules.registry.RegistryManager.get(self.db) if self.db else None
@property
def db(self):
"""
The database linked to this request. Can be ``None``
if the current request uses the ``none`` authentication.
"""
return self.session.db if not self.disable_db else None
@lazy_property
def httpsession(self):
""" HTTP session data
.. deprecated:: 8.0
Use :attr:`.session` instead.
"""
return self.session
def route(route=None, **kw):
"""
Decorator marking the decorated method as being a handler for
requests. The method must be part of a subclass of ``Controller``.
:param route: string or array. The route part that will determine which
http requests will match the decorated method. Can be a
single string or an array of strings. See werkzeug's routing
documentation for the format of route expression (
http://werkzeug.pocoo.org/docs/routing/ ).
:param type: The type of request, can be ``'http'`` or ``'json'``.
:param auth: The type of authentication method, can on of the following:
* ``user``: The user must be authenticated and the current request
will perform using the rights of the user.
* ``admin``: The user may not be authenticated and the current request
will perform using the admin user.
* ``none``: The method is always active, even if there is no
database. Mainly used by the framework and authentication
modules. There request code will not have any facilities to access
the database nor have any configuration indicating the current
database nor the current user.
:param methods: A sequence of http methods this route applies to. If not
specified, all methods are allowed.
:param cors: The Access-Control-Allow-Origin cors directive value.
"""
routing = kw.copy()
assert not 'type' in routing or routing['type'] in ("http", "json")
def decorator(f):
if route:
if isinstance(route, list):
routes = route
else:
routes = [route]
routing['routes'] = routes
@functools.wraps(f)
def response_wrap(*args, **kw):
response = f(*args, **kw)
if isinstance(response, Response) or f.routing_type == 'json':
return response
if isinstance(response, basestring):
return Response(response)
if isinstance(response, werkzeug.exceptions.HTTPException):
response = response.get_response(request.httprequest.environ)
if isinstance(response, werkzeug.wrappers.BaseResponse):
response = Response.force_type(response)
response.set_default()
return response
_logger.warn("<function %s.%s> returns an invalid response type for an http request" % (f.__module__, f.__name__))
return response
response_wrap.routing = routing
response_wrap.original_func = f
return response_wrap
return decorator
class JsonRequest(WebRequest):
""" Request handler for `JSON-RPC 2
<http://www.jsonrpc.org/specification>`_ over HTTP
* ``method`` is ignored
* ``params`` must be a JSON object (not an array) and is passed as keyword
arguments to the handler method
* the handler method's result is returned as JSON-RPC ``result`` and
wrapped in the `JSON-RPC Response
<http://www.jsonrpc.org/specification#response_object>`_
Sucessful request::
--> {"jsonrpc": "2.0",
"method": "call",
"params": {"context": {},
"arg1": "val1" },
"id": null}
<-- {"jsonrpc": "2.0",
"result": { "res1": "val1" },
"id": null}
Request producing a error::
--> {"jsonrpc": "2.0",
"method": "call",
"params": {"context": {},
"arg1": "val1" },
"id": null}
<-- {"jsonrpc": "2.0",
"error": {"code": 1,
"message": "End user error message.",
"data": {"code": "codestring",
"debug": "traceback" } },
"id": null}
"""
_request_type = "json"
def __init__(self, *args):
super(JsonRequest, self).__init__(*args)
self.jsonp_handler = None
args = self.httprequest.args
jsonp = args.get('jsonp')
self.jsonp = jsonp
request = None
request_id = args.get('id')
if jsonp and self.httprequest.method == 'POST':
# jsonp 2 steps step1 POST: save call
def handler():
self.session['jsonp_request_%s' % (request_id,)] = self.httprequest.form['r']
self.session.modified = True
headers=[('Content-Type', 'text/plain; charset=utf-8')]
r = werkzeug.wrappers.Response(request_id, headers=headers)
return r
self.jsonp_handler = handler
return
elif jsonp and args.get('r'):
# jsonp method GET
request = args.get('r')
elif jsonp and request_id:
# jsonp 2 steps step2 GET: run and return result
request = self.session.pop('jsonp_request_%s' % (request_id,), '{}')
else:
# regular jsonrpc2
request = self.httprequest.stream.read()
# Read POST content or POST Form Data named "request"
try:
self.jsonrequest = simplejson.loads(request)
except simplejson.JSONDecodeError:
msg = 'Invalid JSON data: %r' % (request,)
_logger.error('%s: %s', self.httprequest.path, msg)
raise werkzeug.exceptions.BadRequest(msg)
self.params = dict(self.jsonrequest.get("params", {}))
self.context = self.params.pop('context', dict(self.session.context))
def _json_response(self, result=None, error=None):
response = {
'jsonrpc': '2.0',
'id': self.jsonrequest.get('id')
}
if error is not None:
response['error'] = error
if result is not None:
response['result'] = result
if self.jsonp:
# If we use jsonp, that's mean we are called from another host
# Some browser (IE and Safari) do no allow third party cookies
# We need then to manage http sessions manually.
response['session_id'] = self.session_id
mime = 'application/javascript'
body = "%s(%s);" % (self.jsonp, simplejson.dumps(response),)
else:
mime = 'application/json'
body = simplejson.dumps(response)
return Response(
body, headers=[('Content-Type', mime),
('Content-Length', len(body))])
def _handle_exception(self, exception):
"""Called within an except block to allow converting exceptions
to arbitrary responses. Anything returned (except None) will
be used as response."""
try:
return super(JsonRequest, self)._handle_exception(exception)
except Exception:
if not isinstance(exception, (openerp.exceptions.Warning, SessionExpiredException)):
_logger.exception("Exception during JSON request handling.")
error = {
'code': 200,
'message': "Odoo Server Error",
'data': serialize_exception(exception)
}
if isinstance(exception, AuthenticationError):
error['code'] = 100
error['message'] = "Odoo Session Invalid"
if isinstance(exception, SessionExpiredException):
error['code'] = 100
error['message'] = "Odoo Session Expired"
return self._json_response(error=error)
def dispatch(self):
if self.jsonp_handler:
return self.jsonp_handler()
try:
rpc_request_flag = rpc_request.isEnabledFor(logging.DEBUG)
rpc_response_flag = rpc_response.isEnabledFor(logging.DEBUG)
if rpc_request_flag or rpc_response_flag:
endpoint = self.endpoint.method.__name__
model = self.params.get('model')
method = self.params.get('method')
args = self.params.get('args', [])
start_time = time.time()
_, start_vms = 0, 0
if psutil:
_, start_vms = memory_info(psutil.Process(os.getpid()))
if rpc_request and rpc_response_flag:
rpc_request.debug('%s: %s %s, %s',
endpoint, model, method, pprint.pformat(args))
result = self._call_function(**self.params)
if rpc_request_flag or rpc_response_flag:
end_time = time.time()
_, end_vms = 0, 0
if psutil:
_, end_vms = memory_info(psutil.Process(os.getpid()))
logline = '%s: %s %s: time:%.3fs mem: %sk -> %sk (diff: %sk)' % (
endpoint, model, method, end_time - start_time, start_vms / 1024, end_vms / 1024, (end_vms - start_vms)/1024)
if rpc_response_flag:
rpc_response.debug('%s, %s', logline, pprint.pformat(result))
else:
rpc_request.debug(logline)
return self._json_response(result)
except Exception, e:
return self._handle_exception(e)
def serialize_exception(e):
tmp = {
"name": type(e).__module__ + "." + type(e).__name__ if type(e).__module__ else type(e).__name__,
"debug": traceback.format_exc(),
"message": ustr(e),
"arguments": to_jsonable(e.args),
}
if isinstance(e, openerp.osv.osv.except_osv):
tmp["exception_type"] = "except_osv"
elif isinstance(e, openerp.exceptions.Warning):
tmp["exception_type"] = "warning"
elif isinstance(e, openerp.exceptions.AccessError):
tmp["exception_type"] = "access_error"
elif isinstance(e, openerp.exceptions.AccessDenied):
tmp["exception_type"] = "access_denied"
return tmp
def to_jsonable(o):
if isinstance(o, str) or isinstance(o,unicode) or isinstance(o, int) or isinstance(o, long) \
or isinstance(o, bool) or o is None or isinstance(o, float):
return o
if isinstance(o, list) or isinstance(o, tuple):
return [to_jsonable(x) for x in o]
if isinstance(o, dict):
tmp = {}
for k, v in o.items():
tmp[u"%s" % k] = to_jsonable(v)
return tmp
return ustr(o)
def jsonrequest(f):
"""
.. deprecated:: 8.0
Use the :func:`~openerp.http.route` decorator instead.
"""
base = f.__name__.lstrip('/')
if f.__name__ == "index":
base = ""
return route([base, base + "/<path:_ignored_path>"], type="json", auth="user", combine=True)(f)
class HttpRequest(WebRequest):
""" Handler for the ``http`` request type.
matched routing parameters, query string parameters, form_ parameters
and files are passed to the handler method as keyword arguments.
In case of name conflict, routing parameters have priority.
The handler method's result can be:
* a falsy value, in which case the HTTP response will be an
`HTTP 204`_ (No Content)
* a werkzeug Response object, which is returned as-is
* a ``str`` or ``unicode``, will be wrapped in a Response object and
interpreted as HTML
.. _form: http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4.2
.. _HTTP 204: http://tools.ietf.org/html/rfc7231#section-6.3.5
"""
_request_type = "http"
def __init__(self, *args):
super(HttpRequest, self).__init__(*args)
params = self.httprequest.args.to_dict()
params.update(self.httprequest.form.to_dict())
params.update(self.httprequest.files.to_dict())
params.pop('session_id', None)
self.params = params
def _handle_exception(self, exception):
"""Called within an except block to allow converting exceptions
to abitrary responses. Anything returned (except None) will
be used as response."""
try:
return super(HttpRequest, self)._handle_exception(exception)
except SessionExpiredException:
if not request.params.get('noredirect'):
query = werkzeug.urls.url_encode({
'redirect': request.httprequest.url,
})
return werkzeug.utils.redirect('/web/login?%s' % query)
except werkzeug.exceptions.HTTPException, e:
return e
def dispatch(self):
if request.httprequest.method == 'OPTIONS' and request.endpoint and request.endpoint.routing.get('cors'):
headers = {
'Access-Control-Max-Age': 60 * 60 * 24,
'Access-Control-Allow-Headers': 'Origin, X-Requested-With, Content-Type, Accept'
}
return Response(status=200, headers=headers)
r = self._call_function(**self.params)
if not r:
r = Response(status=204) # no content
return r
def make_response(self, data, headers=None, cookies=None):
""" Helper for non-HTML responses, or HTML responses with custom
response headers or cookies.
While handlers can just return the HTML markup of a page they want to
send as a string if non-HTML data is returned they need to create a
complete response object, or the returned data will not be correctly
interpreted by the clients.
:param basestring data: response body
:param headers: HTTP headers to set on the response
:type headers: ``[(name, value)]``
:param collections.Mapping cookies: cookies to set on the client
"""
response = Response(data, headers=headers)
if cookies:
for k, v in cookies.iteritems():
response.set_cookie(k, v)
return response
def render(self, template, qcontext=None, lazy=True, **kw):
""" Lazy render of a QWeb template.
The actual rendering of the given template will occur at then end of
the dispatching. Meanwhile, the template and/or qcontext can be
altered or even replaced by a static response.
:param basestring template: template to render
:param dict qcontext: Rendering context to use
:param bool lazy: whether the template rendering should be deferred
until the last possible moment
:param kw: forwarded to werkzeug's Response object
"""
response = Response(template=template, qcontext=qcontext, **kw)
if not lazy:
return response.render()
return response
def not_found(self, description=None):
""" Shortcut for a `HTTP 404
<http://tools.ietf.org/html/rfc7231#section-6.5.4>`_ (Not Found)
response
"""
return werkzeug.exceptions.NotFound(description)
def httprequest(f):
"""
.. deprecated:: 8.0
Use the :func:`~openerp.http.route` decorator instead.
"""
base = f.__name__.lstrip('/')
if f.__name__ == "index":
base = ""
return route([base, base + "/<path:_ignored_path>"], type="http", auth="user", combine=True)(f)
#----------------------------------------------------------
# Controller and route registration
#----------------------------------------------------------
addons_module = {}
addons_manifest = {}
controllers_per_module = collections.defaultdict(list)
class ControllerType(type):
def __init__(cls, name, bases, attrs):
super(ControllerType, cls).__init__(name, bases, attrs)
# flag old-style methods with req as first argument
for k, v in attrs.items():
if inspect.isfunction(v) and hasattr(v, 'original_func'):
# Set routing type on original functions
routing_type = v.routing.get('type')
parent = [claz for claz in bases if isinstance(claz, ControllerType) and hasattr(claz, k)]
parent_routing_type = getattr(parent[0], k).original_func.routing_type if parent else routing_type or 'http'
if routing_type is not None and routing_type is not parent_routing_type:
routing_type = parent_routing_type
_logger.warn("Subclass re-defines <function %s.%s.%s> with different type than original."
" Will use original type: %r" % (cls.__module__, cls.__name__, k, parent_routing_type))
v.original_func.routing_type = routing_type or parent_routing_type
spec = inspect.getargspec(v.original_func)
first_arg = spec.args[1] if len(spec.args) >= 2 else None
if first_arg in ["req", "request"]:
v._first_arg_is_req = True
# store the controller in the controllers list
name_class = ("%s.%s" % (cls.__module__, cls.__name__), cls)
class_path = name_class[0].split(".")
if not class_path[:2] == ["openerp", "addons"]:
module = ""
else:
# we want to know all modules that have controllers
module = class_path[2]
# but we only store controllers directly inheriting from Controller
if not "Controller" in globals() or not Controller in bases:
return
controllers_per_module[module].append(name_class)
class Controller(object):
__metaclass__ = ControllerType
class EndPoint(object):
def __init__(self, method, routing):
self.method = method
self.original = getattr(method, 'original_func', method)
self.routing = routing
self.arguments = {}
@property
def first_arg_is_req(self):
# Backward for 7.0
return getattr(self.method, '_first_arg_is_req', False)
def __call__(self, *args, **kw):
return self.method(*args, **kw)
def routing_map(modules, nodb_only, converters=None):
routing_map = werkzeug.routing.Map(strict_slashes=False, converters=converters)
def get_subclasses(klass):
def valid(c):
return c.__module__.startswith('openerp.addons.') and c.__module__.split(".")[2] in modules
subclasses = klass.__subclasses__()
result = []
for subclass in subclasses:
if valid(subclass):
result.extend(get_subclasses(subclass))
if not result and valid(klass):
result = [klass]
return result
uniq = lambda it: collections.OrderedDict((id(x), x) for x in it).values()
for module in modules:
if module not in controllers_per_module:
continue
for _, cls in controllers_per_module[module]:
subclasses = uniq(c for c in get_subclasses(cls) if c is not cls)
if subclasses:
name = "%s (extended by %s)" % (cls.__name__, ', '.join(sub.__name__ for sub in subclasses))
cls = type(name, tuple(reversed(subclasses)), {})
o = cls()
members = inspect.getmembers(o, inspect.ismethod)
for _, mv in members:
if hasattr(mv, 'routing'):
routing = dict(type='http', auth='user', methods=None, routes=None)
methods_done = list()
# update routing attributes from subclasses(auth, methods...)
for claz in reversed(mv.im_class.mro()):
fn = getattr(claz, mv.func_name, None)
if fn and hasattr(fn, 'routing') and fn not in methods_done:
methods_done.append(fn)
routing.update(fn.routing)
if not nodb_only or routing['auth'] == "none":
assert routing['routes'], "Method %r has not route defined" % mv
endpoint = EndPoint(mv, routing)
for url in routing['routes']:
if routing.get("combine", False):
# deprecated v7 declaration
url = o._cp_path.rstrip('/') + '/' + url.lstrip('/')
if url.endswith("/") and len(url) > 1:
url = url[: -1]
xtra_keys = 'defaults subdomain build_only strict_slashes redirect_to alias host'.split()
kw = {k: routing[k] for k in xtra_keys if k in routing}
routing_map.add(werkzeug.routing.Rule(url, endpoint=endpoint, methods=routing['methods'], **kw))
return routing_map
#----------------------------------------------------------
# HTTP Sessions
#----------------------------------------------------------
class AuthenticationError(Exception):
pass
class SessionExpiredException(Exception):
pass
class Service(object):
"""
.. deprecated:: 8.0
Use :func:`dispatch_rpc` instead.
"""
def __init__(self, session, service_name):
self.session = session
self.service_name = service_name
def __getattr__(self, method):
def proxy_method(*args):
result = dispatch_rpc(self.service_name, method, args)
return result
return proxy_method
class Model(object):
"""
.. deprecated:: 8.0
Use the registry and cursor in :data:`request` instead.
"""
def __init__(self, session, model):
self.session = session
self.model = model
self.proxy = self.session.proxy('object')
def __getattr__(self, method):
self.session.assert_valid()
def proxy(*args, **kw):
# Can't provide any retro-compatibility for this case, so we check it and raise an Exception
# to tell the programmer to adapt his code
if not request.db or not request.uid or self.session.db != request.db \
or self.session.uid != request.uid:
raise Exception("Trying to use Model with badly configured database or user.")
if method.startswith('_'):
raise Exception("Access denied")
mod = request.registry[self.model]
meth = getattr(mod, method)
# make sure to instantiate an environment
cr = request.env.cr
result = meth(cr, request.uid, *args, **kw)
# reorder read
if method == "read":
if isinstance(result, list) and len(result) > 0 and "id" in result[0]:
index = {}
for r in result:
index[r['id']] = r
result = [index[x] for x in args[0] if x in index]
return result
return proxy
class OpenERPSession(werkzeug.contrib.sessions.Session):
def __init__(self, *args, **kwargs):
self.inited = False
self.modified = False
self.rotate = False
super(OpenERPSession, self).__init__(*args, **kwargs)
self.inited = True
self._default_values()
self.modified = False
def __getattr__(self, attr):
return self.get(attr, None)
def __setattr__(self, k, v):
if getattr(self, "inited", False):
try:
object.__getattribute__(self, k)
except:
return self.__setitem__(k, v)
object.__setattr__(self, k, v)
def authenticate(self, db, login=None, password=None, uid=None):
"""
Authenticate the current user with the given db, login and
password. If successful, store the authentication parameters in the
current session and request.
:param uid: If not None, that user id will be used instead the login
to authenticate the user.
"""
if uid is None:
wsgienv = request.httprequest.environ
env = dict(
base_location=request.httprequest.url_root.rstrip('/'),
HTTP_HOST=wsgienv['HTTP_HOST'],
REMOTE_ADDR=wsgienv['REMOTE_ADDR'],
)
uid = dispatch_rpc('common', 'authenticate', [db, login, password, env])
else:
security.check(db, uid, password)
self.db = db
self.uid = uid
self.login = login
self.password = password
request.uid = uid
request.disable_db = False
if uid: self.get_context()
return uid
def check_security(self):
"""
Check the current authentication parameters to know if those are still
valid. This method should be called at each request. If the
authentication fails, a :exc:`SessionExpiredException` is raised.
"""
if not self.db or not self.uid:
raise SessionExpiredException("Session expired")
security.check(self.db, self.uid, self.password)
def logout(self, keep_db=False):
for k in self.keys():
if not (keep_db and k == 'db'):
del self[k]
self._default_values()
self.rotate = True
def _default_values(self):
self.setdefault("db", None)
self.setdefault("uid", None)
self.setdefault("login", None)
self.setdefault("password", None)
self.setdefault("context", {})
def get_context(self):
"""
Re-initializes the current user's session context (based on his
preferences) by calling res.users.get_context() with the old context.
:returns: the new context
"""
assert self.uid, "The user needs to be logged-in to initialize his context"
self.context = request.registry.get('res.users').context_get(request.cr, request.uid) or {}
self.context['uid'] = self.uid
self._fix_lang(self.context)
return self.context
def _fix_lang(self, context):
""" OpenERP provides languages which may not make sense and/or may not
be understood by the web client's libraries.
Fix those here.
:param dict context: context to fix
"""
lang = context.get('lang')
# inane OpenERP locale
if lang == 'ar_AR':
lang = 'ar'
# lang to lang_REGION (datejs only handles lang_REGION, no bare langs)
if lang in babel.core.LOCALE_ALIASES:
lang = babel.core.LOCALE_ALIASES[lang]
context['lang'] = lang or 'en_US'
# Deprecated to be removed in 9
"""
Damn properties for retro-compatibility. All of that is deprecated,
all of that.
"""
@property
def _db(self):
return self.db
@_db.setter
def _db(self, value):
self.db = value
@property
def _uid(self):
return self.uid
@_uid.setter
def _uid(self, value):
self.uid = value
@property
def _login(self):
return self.login
@_login.setter
def _login(self, value):
self.login = value
@property
def _password(self):
return self.password
@_password.setter
def _password(self, value):
self.password = value
def send(self, service_name, method, *args):
"""
.. deprecated:: 8.0
Use :func:`dispatch_rpc` instead.
"""
return dispatch_rpc(service_name, method, args)
def proxy(self, service):
"""
.. deprecated:: 8.0
Use :func:`dispatch_rpc` instead.
"""
return Service(self, service)
def assert_valid(self, force=False):
"""
.. deprecated:: 8.0
Use :meth:`check_security` instead.
Ensures this session is valid (logged into the openerp server)
"""
if self.uid and not force:
return
# TODO use authenticate instead of login
self.uid = self.proxy("common").login(self.db, self.login, self.password)
if not self.uid:
raise AuthenticationError("Authentication failure")
def ensure_valid(self):
"""
.. deprecated:: 8.0
Use :meth:`check_security` instead.
"""
if self.uid:
try:
self.assert_valid(True)
except Exception:
self.uid = None
def execute(self, model, func, *l, **d):
"""
.. deprecated:: 8.0
Use the registry and cursor in :data:`request` instead.
"""
model = self.model(model)
r = getattr(model, func)(*l, **d)
return r
def exec_workflow(self, model, id, signal):
"""
.. deprecated:: 8.0
Use the registry and cursor in :data:`request` instead.
"""
self.assert_valid()
r = self.proxy('object').exec_workflow(self.db, self.uid, self.password, model, signal, id)
return r
def model(self, model):
"""
.. deprecated:: 8.0
Use the registry and cursor in :data:`request` instead.
Get an RPC proxy for the object ``model``, bound to this session.
:param model: an OpenERP model name
:type model: str
:rtype: a model object
"""
if not self.db:
raise SessionExpiredException("Session expired")
return Model(self, model)
def save_action(self, action):
"""
This method store an action object in the session and returns an integer
identifying that action. The method get_action() can be used to get
back the action.
:param the_action: The action to save in the session.
:type the_action: anything
:return: A key identifying the saved action.
:rtype: integer
"""
saved_actions = self.setdefault('saved_actions', {"next": 1, "actions": {}})
# we don't allow more than 10 stored actions
if len(saved_actions["actions"]) >= 10:
del saved_actions["actions"][min(saved_actions["actions"])]
key = saved_actions["next"]
saved_actions["actions"][key] = action
saved_actions["next"] = key + 1
self.modified = True
return key
def get_action(self, key):
"""
Gets back a previously saved action. This method can return None if the action
was saved since too much time (this case should be handled in a smart way).
:param key: The key given by save_action()
:type key: integer
:return: The saved action or None.
:rtype: anything
"""
saved_actions = self.get('saved_actions', {})
return saved_actions.get("actions", {}).get(key)
def session_gc(session_store):
if random.random() < 0.001:
# we keep session one week
last_week = time.time() - 60*60*24*7
for fname in os.listdir(session_store.path):
path = os.path.join(session_store.path, fname)
try:
if os.path.getmtime(path) < last_week:
os.unlink(path)
except OSError:
pass
#----------------------------------------------------------
# WSGI Layer
#----------------------------------------------------------
# Add potentially missing (older ubuntu) font mime types
mimetypes.add_type('application/font-woff', '.woff')
mimetypes.add_type('application/vnd.ms-fontobject', '.eot')
mimetypes.add_type('application/x-font-ttf', '.ttf')
class Response(werkzeug.wrappers.Response):
""" Response object passed through controller route chain.
In addition to the :class:`werkzeug.wrappers.Response` parameters, this
class's constructor can take the following additional parameters
for QWeb Lazy Rendering.
:param basestring template: template to render
:param dict qcontext: Rendering context to use
:param int uid: User id to use for the ir.ui.view render call,
``None`` to use the request's user (the default)
these attributes are available as parameters on the Response object and
can be altered at any time before rendering
Also exposes all the attributes and methods of
:class:`werkzeug.wrappers.Response`.
"""
default_mimetype = 'text/html'
def __init__(self, *args, **kw):
template = kw.pop('template', None)
qcontext = kw.pop('qcontext', None)
uid = kw.pop('uid', None)
super(Response, self).__init__(*args, **kw)
self.set_default(template, qcontext, uid)
def set_default(self, template=None, qcontext=None, uid=None):
self.template = template
self.qcontext = qcontext or dict()
self.uid = uid
# Support for Cross-Origin Resource Sharing
if request.endpoint and 'cors' in request.endpoint.routing:
self.headers.set('Access-Control-Allow-Origin', request.endpoint.routing['cors'])
methods = 'GET, POST'
if request.endpoint.routing['type'] == 'json':
methods = 'POST'
elif request.endpoint.routing.get('methods'):
methods = ', '.join(request.endpoint.routing['methods'])
self.headers.set('Access-Control-Allow-Methods', methods)
@property
def is_qweb(self):
return self.template is not None
def render(self):
""" Renders the Response's template, returns the result
"""
view_obj = request.registry["ir.ui.view"]
uid = self.uid or request.uid or openerp.SUPERUSER_ID
return view_obj.render(
request.cr, uid, self.template, self.qcontext,
context=request.context)
def flatten(self):
""" Forces the rendering of the response's template, sets the result
as response body and unsets :attr:`.template`
"""
self.response.append(self.render())
self.template = None
class DisableCacheMiddleware(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
def start_wrapped(status, headers):
referer = environ.get('HTTP_REFERER', '')
parsed = urlparse.urlparse(referer)
debug = parsed.query.count('debug') >= 1
new_headers = []
unwanted_keys = ['Last-Modified']
if debug:
new_headers = [('Cache-Control', 'no-cache')]
unwanted_keys += ['Expires', 'Etag', 'Cache-Control']
for k, v in headers:
if k not in unwanted_keys:
new_headers.append((k, v))
start_response(status, new_headers)
return self.app(environ, start_wrapped)
class Root(object):
"""Root WSGI application for the OpenERP Web Client.
"""
def __init__(self):
self._loaded = False
@lazy_property
def session_store(self):
# Setup http sessions
path = openerp.tools.config.session_dir
_logger.debug('HTTP sessions stored in: %s', path)
return werkzeug.contrib.sessions.FilesystemSessionStore(path, session_class=OpenERPSession)
@lazy_property
def nodb_routing_map(self):
_logger.info("Generating nondb routing")
return routing_map([''] + openerp.conf.server_wide_modules, True)
def __call__(self, environ, start_response):
""" Handle a WSGI request
"""
if not self._loaded:
self._loaded = True
self.load_addons()
return self.dispatch(environ, start_response)
def load_addons(self):
""" Load all addons from addons path containing static files and
controllers and configure them. """
# TODO should we move this to ir.http so that only configured modules are served ?
statics = {}
for addons_path in openerp.modules.module.ad_paths:
for module in sorted(os.listdir(str(addons_path))):
if module not in addons_module:
manifest_path = os.path.join(addons_path, module, '__openerp__.py')
path_static = os.path.join(addons_path, module, 'static')
if os.path.isfile(manifest_path) and os.path.isdir(path_static):
manifest = ast.literal_eval(open(manifest_path).read())
if not manifest.get('installable', True):
continue
manifest['addons_path'] = addons_path
_logger.debug("Loading %s", module)
if 'openerp.addons' in sys.modules:
m = __import__('openerp.addons.' + module)
else:
m = None
addons_module[module] = m
addons_manifest[module] = manifest
statics['/%s/static' % module] = path_static
if statics:
_logger.info("HTTP Configuring static files")
app = werkzeug.wsgi.SharedDataMiddleware(self.dispatch, statics, cache_timeout=STATIC_CACHE)
self.dispatch = DisableCacheMiddleware(app)
def setup_session(self, httprequest):
# recover or create session
session_gc(self.session_store)
sid = httprequest.args.get('session_id')
explicit_session = True
if not sid:
sid = httprequest.headers.get("X-Openerp-Session-Id")
if not sid:
sid = httprequest.cookies.get('session_id')
explicit_session = False
if sid is None:
httprequest.session = self.session_store.new()
else:
httprequest.session = self.session_store.get(sid)
return explicit_session
def setup_db(self, httprequest):
db = httprequest.session.db
# Check if session.db is legit
if db:
if db not in db_filter([db], httprequest=httprequest):
_logger.warn("Logged into database '%s', but dbfilter "
"rejects it; logging session out.", db)
httprequest.session.logout()
db = None
if not db:
httprequest.session.db = db_monodb(httprequest)
def setup_lang(self, httprequest):
if not "lang" in httprequest.session.context:
lang = httprequest.accept_languages.best or "en_US"
lang = babel.core.LOCALE_ALIASES.get(lang, lang).replace('-', '_')
httprequest.session.context["lang"] = lang
def get_request(self, httprequest):
# deduce type of request
if httprequest.args.get('jsonp'):
return JsonRequest(httprequest)
if httprequest.mimetype in ("application/json", "application/json-rpc"):
return JsonRequest(httprequest)
else:
return HttpRequest(httprequest)
def get_response(self, httprequest, result, explicit_session):
if isinstance(result, Response) and result.is_qweb:
try:
result.flatten()
except(Exception), e:
if request.db:
result = request.registry['ir.http']._handle_exception(e)
else:
raise
if isinstance(result, basestring):
response = Response(result, mimetype='text/html')
else:
response = result
if httprequest.session.should_save:
if httprequest.session.rotate:
self.session_store.delete(httprequest.session)
httprequest.session.sid = self.session_store.generate_key()
httprequest.session.modified = True
self.session_store.save(httprequest.session)
# We must not set the cookie if the session id was specified using a http header or a GET parameter.
# There are two reasons to this:
# - When using one of those two means we consider that we are overriding the cookie, which means creating a new
# session on top of an already existing session and we don't want to create a mess with the 'normal' session
# (the one using the cookie). That is a special feature of the Session Javascript class.
# - It could allow session fixation attacks.
if not explicit_session and hasattr(response, 'set_cookie'):
response.set_cookie('session_id', httprequest.session.sid, max_age=90 * 24 * 60 * 60)
return response
def dispatch(self, environ, start_response):
"""
Performs the actual WSGI dispatching for the application.
"""
try:
httprequest = werkzeug.wrappers.Request(environ)
httprequest.app = self
explicit_session = self.setup_session(httprequest)
self.setup_db(httprequest)
self.setup_lang(httprequest)
request = self.get_request(httprequest)
def _dispatch_nodb():
try:
func, arguments = self.nodb_routing_map.bind_to_environ(request.httprequest.environ).match()
except werkzeug.exceptions.HTTPException, e:
return request._handle_exception(e)
request.set_handler(func, arguments, "none")
result = request.dispatch()
return result
with request:
db = request.session.db
if db:
openerp.modules.registry.RegistryManager.check_registry_signaling(db)
try:
with openerp.tools.mute_logger('openerp.sql_db'):
ir_http = request.registry['ir.http']
except (AttributeError, psycopg2.OperationalError):
# psycopg2 error or attribute error while constructing
# the registry. That means the database probably does
# not exists anymore or the code doesnt match the db.
# Log the user out and fall back to nodb
request.session.logout()
result = _dispatch_nodb()
else:
result = ir_http._dispatch()
openerp.modules.registry.RegistryManager.signal_caches_change(db)
else:
result = _dispatch_nodb()
response = self.get_response(httprequest, result, explicit_session)
return response(environ, start_response)
except werkzeug.exceptions.HTTPException, e:
return e(environ, start_response)
def get_db_router(self, db):
if not db:
return self.nodb_routing_map
return request.registry['ir.http'].routing_map()
def db_list(force=False, httprequest=None):
dbs = dispatch_rpc("db", "list", [force])
return db_filter(dbs, httprequest=httprequest)
def db_filter(dbs, httprequest=None):
httprequest = httprequest or request.httprequest
h = httprequest.environ.get('HTTP_HOST', '').split(':')[0]
d, _, r = h.partition('.')
if d == "www" and r:
d = r.partition('.')[0]
r = openerp.tools.config['dbfilter'].replace('%h', h).replace('%d', d)
dbs = [i for i in dbs if re.match(r, i)]
return dbs
def db_monodb(httprequest=None):
"""
Magic function to find the current database.
Implementation details:
* Magic
* More magic
Returns ``None`` if the magic is not magic enough.
"""
httprequest = httprequest or request.httprequest
dbs = db_list(True, httprequest)
# try the db already in the session
db_session = httprequest.session.db
if db_session in dbs:
return db_session
# if there is only one possible db, we take that one
if len(dbs) == 1:
return dbs[0]
return None
def send_file(filepath_or_fp, mimetype=None, as_attachment=False, filename=None, mtime=None,
add_etags=True, cache_timeout=STATIC_CACHE, conditional=True):
"""This is a modified version of Flask's send_file()
Sends the contents of a file to the client. This will use the
most efficient method available and configured. By default it will
try to use the WSGI server's file_wrapper support.
By default it will try to guess the mimetype for you, but you can
also explicitly provide one. For extra security you probably want
to send certain files as attachment (HTML for instance). The mimetype
guessing requires a `filename` or an `attachment_filename` to be
provided.
Please never pass filenames to this function from user sources without
checking them first.
:param filepath_or_fp: the filename of the file to send.
Alternatively a file object might be provided
in which case `X-Sendfile` might not work and
fall back to the traditional method. Make sure
that the file pointer is positioned at the start
of data to send before calling :func:`send_file`.
:param mimetype: the mimetype of the file if provided, otherwise
auto detection happens.
:param as_attachment: set to `True` if you want to send this file with
a ``Content-Disposition: attachment`` header.
:param filename: the filename for the attachment if it differs from the file's filename or
if using file object without 'name' attribute (eg: E-tags with StringIO).
:param mtime: last modification time to use for contitional response.
:param add_etags: set to `False` to disable attaching of etags.
:param conditional: set to `False` to disable conditional responses.
:param cache_timeout: the timeout in seconds for the headers.
"""
if isinstance(filepath_or_fp, (str, unicode)):
if not filename:
filename = os.path.basename(filepath_or_fp)
file = open(filepath_or_fp, 'rb')
if not mtime:
mtime = os.path.getmtime(filepath_or_fp)
else:
file = filepath_or_fp
if not filename:
filename = getattr(file, 'name', None)
file.seek(0, 2)
size = file.tell()
file.seek(0)
if mimetype is None and filename:
mimetype = mimetypes.guess_type(filename)[0]
if mimetype is None:
mimetype = 'application/octet-stream'
headers = werkzeug.datastructures.Headers()
if as_attachment:
if filename is None:
raise TypeError('filename unavailable, required for sending as attachment')
headers.add('Content-Disposition', 'attachment', filename=filename)
headers['Content-Length'] = size
data = wrap_file(request.httprequest.environ, file)
rv = Response(data, mimetype=mimetype, headers=headers,
direct_passthrough=True)
if isinstance(mtime, str):
try:
server_format = openerp.tools.misc.DEFAULT_SERVER_DATETIME_FORMAT
mtime = datetime.datetime.strptime(mtime.split('.')[0], server_format)
except Exception:
mtime = None
if mtime is not None:
rv.last_modified = mtime
rv.cache_control.public = True
if cache_timeout:
rv.cache_control.max_age = cache_timeout
rv.expires = int(time.time() + cache_timeout)
if add_etags and filename and mtime:
rv.set_etag('odoo-%s-%s-%s' % (
mtime,
size,
adler32(
filename.encode('utf-8') if isinstance(filename, unicode)
else filename
) & 0xffffffff
))
if conditional:
rv = rv.make_conditional(request.httprequest)
# make sure we don't send x-sendfile for servers that
# ignore the 304 status code for x-sendfile.
if rv.status_code == 304:
rv.headers.pop('x-sendfile', None)
return rv
#----------------------------------------------------------
# RPC controller
#----------------------------------------------------------
class CommonController(Controller):
@route('/jsonrpc', type='json', auth="none")
def jsonrpc(self, service, method, args):
""" Method used by client APIs to contact OpenERP. """
return dispatch_rpc(service, method, args)
@route('/gen_session_id', type='json', auth="none")
def gen_session_id(self):
nsession = root.session_store.new()
return nsession.sid
# register main wsgi handler
root = Root()
openerp.service.wsgi_server.register_wsgi_handler(root)
# vim:et:ts=4:sw=4:
|
gbaman/python-sense-hat | refs/heads/master | setup.py | 4 | import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="sense-hat",
version="2.1.0",
author="Dave Honess",
author_email="dave@raspberrypi.org",
description="Python module to control the Raspberry Pi Sense HAT used in the Astro Pi mission",
long_description=read('README.rst'),
license="BSD",
keywords=[
"sense hat",
"raspberrypi",
"astro pi",
],
url="https://github.com/RPi-Distro/python-sense-hat",
packages=find_packages(),
package_data={
"txt": ['sense_hat_text.txt'],
"png": ['sense_hat_text.png']
},
include_package_data=True,
install_requires=[
"pillow",
"numpy"
],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Astronomy",
"Topic :: Scientific/Engineering :: Atmospheric Science",
"Topic :: Education",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
],
)
|
kencoken/imsearch-tools | refs/heads/master | imsearchtools/postproc_modules/example_textlog_module.py | 2 | #!/usr/bin/env python
import json
def callback_func(out_dict, extra_prms=None):
with open('test.txt','a') as myfile:
myfile.write('\n')
myfile.write('OUT_DICT:\n')
myfile.write(json.dumps(out_dict))
if extra_prms:
myfile.write('\nEXTRA_PRMS:\n')
myfile.write(json.dumps(extra_prms))
myfile.write('\n')
|
PSUdaemon/trafficserver | refs/heads/master | tests/gold_tests/h2/h2bigclient.py | 10 | '''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from hyper import HTTPConnection
import hyper
import argparse
def getResponseString(response):
typestr = str(type(response))
if typestr.find('HTTP20') != -1:
string = "HTTP/2 {0}\r\n".format(response.status)
else:
string = "HTTP {0}\r\n".format(response.status)
string += 'date: ' + response.headers.get('date')[0].decode('utf-8') + "\r\n"
string += 'server: ' + response.headers.get('Server')[0].decode('utf-8') + "\r\n"
return string
def makerequest(port):
hyper.tls._context = hyper.tls.init_context()
hyper.tls._context.check_hostname = False
hyper.tls._context.verify_mode = hyper.compat.ssl.CERT_NONE
conn = HTTPConnection('localhost:{0}'.format(port), secure=True)
# Fetch the object twice so we know at least one time comes from cache
# Exploring timing options
sites = ['/bigfile', '/bigfile']
responses = []
request_ids = []
for site in sites:
request_id = conn.request('GET', url=site)
request_ids.append(request_id)
# get responses
for req_id in request_ids:
response = conn.get_response(req_id)
body = response.read()
cl = response.headers.get('Content-Length')[0]
print("Content length = {}\r\n".format(int(cl)))
print("Body length = {}\r\n".format(len(body)))
error = 0
if chr(body[0]) != 'a':
error = 1
print("First char {}".format(body[0]))
i = 1
while i < len(body) and not error:
error = chr(body[i]) != 'b'
if error:
print("bad char {} at {}".format(body[i], i))
i = i + 1
if not error:
print("Content success\r\n")
else:
print("Content fail\r\n")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--port", "-p",
type=int,
help="Port to use")
args = parser.parse_args()
makerequest(args.port)
if __name__ == '__main__':
main()
|
ppiotr/Bibedit-some-refactoring | refs/heads/bibedit-hp-change-to-field-with-many-instances | modules/bibformat/lib/elements/bfe_report_numbers.py | 4 | # -*- coding: utf-8 -*-
##
## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints report numbers
"""
__revision__ = ""
def format(bfo, limit, separator=" ", extension=" etc."):
"""
Prints the report numbers of the record (037__a and 088__a)
@param separator: the separator between report numbers.
@param limit: the max number of report numbers to print
@param extension: a prefix printed when limit param is reached
"""
numbers = bfo.fields("037__a")
numbers.extend(bfo.fields("088__a"))
if limit.isdigit():
limit_as_int = int(limit)
if limit_as_int <= len(numbers):
return separator.join(numbers[:limit_as_int]) + extension
return separator.join(numbers)
|
popazerty/blackhole-vuplus | refs/heads/master | lib/python/Screens/AudioSelection.py | 15 | from Screen import Screen
from Screens.Setup import getConfigMenuItem, Setup
from Screens.InputBox import PinInput
from Screens.MessageBox import MessageBox
from Components.ServiceEventTracker import ServiceEventTracker
from Components.ActionMap import NumberActionMap
from Components.ConfigList import ConfigListScreen
from Components.ChoiceList import ChoiceList, ChoiceEntryComponent
from Components.config import config, ConfigSubsection, getConfigListEntry, ConfigNothing, ConfigSelection, ConfigOnOff
from Components.Label import Label
from Components.MultiContent import MultiContentEntryText
from Components.Sources.List import List
from Components.Sources.Boolean import Boolean
from Components.SystemInfo import SystemInfo
from Components.VolumeControl import VolumeControl
from enigma import iPlayableService, eTimer, eSize
from Tools.ISO639 import LanguageCodes
from Tools.BoundFunction import boundFunction
FOCUS_CONFIG, FOCUS_STREAMS = range(2)
[PAGE_AUDIO, PAGE_SUBTITLES] = ["audio", "subtitles"]
class AudioSelection(Screen, ConfigListScreen):
def __init__(self, session, infobar=None, page=PAGE_AUDIO):
Screen.__init__(self, session)
self["streams"] = List([], enableWrapAround=True)
self["key_red"] = Boolean(False)
self["key_green"] = Boolean(False)
self["key_yellow"] = Boolean(True)
self["key_blue"] = Boolean(False)
self.protectContextMenu = True
ConfigListScreen.__init__(self, [])
self.infobar = infobar or self.session.infobar
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evUpdatedInfo: self.__updatedInfo
})
self.cached_subtitle_checked = False
self.__selected_subtitle = None
self["actions"] = NumberActionMap(["AudioSelectionActions", "SetupActions", "DirectionActions", "MenuActions"],
{
"red": self.keyRed,
"green": self.keyGreen,
"yellow": self.keyYellow,
"blue": self.keyBlue,
"ok": self.keyOk,
"cancel": self.cancel,
"up": self.keyUp,
"down": self.keyDown,
"volumeUp": self.volumeUp,
"volumeDown": self.volumeDown,
"volumeMute": self.volumeMute,
"menu": self.openAutoLanguageSetup,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
}, -2)
self.settings = ConfigSubsection()
choicelist = [(PAGE_AUDIO,""), (PAGE_SUBTITLES,"")]
self.settings.menupage = ConfigSelection(choices = choicelist, default=page)
self.onLayoutFinish.append(self.__layoutFinished)
def __layoutFinished(self):
self["config"].instance.setSelectionEnable(False)
self.focus = FOCUS_STREAMS
self.settings.menupage.addNotifier(self.fillList)
def fillList(self, arg=None):
streams = []
conflist = []
selectedidx = 0
self["key_blue"].setBoolean(False)
service = self.session.nav.getCurrentService()
self.audioTracks = audio = service and service.audioTracks()
n = audio and audio.getNumberOfTracks() or 0
subtitlelist = self.getSubtitleList()
if self.settings.menupage.getValue() == PAGE_AUDIO:
self.setTitle(_("Select audio track"))
service = self.session.nav.getCurrentService()
self.audioTracks = audio = service and service.audioTracks()
n = audio and audio.getNumberOfTracks() or 0
if SystemInfo["CanDownmixAC3"]:
self.settings.downmix = ConfigOnOff(default=config.av.downmix_ac3.value)
self.settings.downmix.addNotifier(self.changeAC3Downmix, initial_call = False)
conflist.append(getConfigListEntry(_("Multi channel downmix"), self.settings.downmix))
self["key_red"].setBoolean(True)
if n > 0:
self.audioChannel = service.audioChannel()
if self.audioChannel:
choicelist = [("0",_("left")), ("1",_("stereo")), ("2", _("right"))]
self.settings.channelmode = ConfigSelection(choices = choicelist, default = str(self.audioChannel.getCurrentChannel()))
self.settings.channelmode.addNotifier(self.changeMode, initial_call = False)
conflist.append(getConfigListEntry(_("Channel"), self.settings.channelmode))
self["key_green"].setBoolean(True)
else:
conflist.append(('',))
self["key_green"].setBoolean(False)
selectedAudio = self.audioTracks.getCurrentTrack()
for x in range(n):
number = str(x + 1)
i = audio.getTrackInfo(x)
languages = i.getLanguage().split('/')
description = i.getDescription() or ""
selected = ""
language = ""
if selectedAudio == x:
selected = "X"
selectedidx = x
cnt = 0
for lang in languages:
if cnt:
language += ' / '
if LanguageCodes.has_key(lang):
language += _(LanguageCodes[lang][0])
elif lang == "und":
""
else:
language += lang
cnt += 1
streams.append((x, "", number, description, language, selected))
else:
streams = []
conflist.append(('',))
self["key_green"].setBoolean(False)
if subtitlelist:
self["key_yellow"].setBoolean(True)
conflist.append(getConfigListEntry(_("To subtitle selection"), self.settings.menupage))
else:
self["key_yellow"].setBoolean(False)
conflist.append(('',))
from Components.PluginComponent import plugins
from Plugins.Plugin import PluginDescriptor
if hasattr(self.infobar, "runPlugin"):
class PluginCaller:
def __init__(self, fnc, *args):
self.fnc = fnc
self.args = args
def __call__(self, *args, **kwargs):
self.fnc(*self.args)
Plugins = [ (p.name, PluginCaller(self.infobar.runPlugin, p)) for p in plugins.getPlugins(where = PluginDescriptor.WHERE_AUDIOMENU) ]
if len(Plugins):
self["key_blue"].setBoolean(True)
conflist.append(getConfigListEntry(Plugins[0][0], ConfigNothing()))
self.plugincallfunc = Plugins[0][1]
if len(Plugins) > 1:
print "plugin(s) installed but not displayed in the dialog box:", Plugins[1:]
elif self.settings.menupage.getValue() == PAGE_SUBTITLES:
self.setTitle(_("Subtitle selection"))
conflist.append(('',))
conflist.append(('',))
self["key_red"].setBoolean(False)
self["key_green"].setBoolean(False)
idx = 0
for x in subtitlelist:
number = str(x[1])
description = "?"
language = ""
selected = ""
if self.selectedSubtitle and x[:4] == self.selectedSubtitle[:4]:
selected = "X"
selectedidx = idx
try:
if x[4] != "und":
if LanguageCodes.has_key(x[4]):
language = _(LanguageCodes[x[4]][0])
else:
language = x[4]
except:
language = ""
if x[0] == 0:
description = "DVB"
number = "%x" % (x[1])
elif x[0] == 1:
description = "teletext"
number = "%x%02x" %(x[3] and x[3] or 8, x[2])
elif x[0] == 2:
types = ("unknown", "embedded", "SSA file", "ASS file",
"SRT file", "VOB file", "PGS file")
try:
description = types[x[2]]
except:
description = _("unknown") + ": %s" % x[2]
number = str(int(number) + 1)
streams.append((x, "", number, description, language, selected))
idx += 1
conflist.append(getConfigListEntry(_("To audio selection"), self.settings.menupage))
if self.infobar.selected_subtitle and self.infobar.selected_subtitle != (0,0,0,0) and not ".DVDPlayer'>" in `self.infobar`:
self["key_blue"].setBoolean(True)
conflist.append(getConfigListEntry(_("Subtitle Quickmenu"), ConfigNothing()))
self["config"].list = conflist
self["config"].l.setList(conflist)
self["streams"].list = streams
self["streams"].setIndex(selectedidx)
def __updatedInfo(self):
self.fillList()
def getSubtitleList(self):
service = self.session.nav.getCurrentService()
subtitle = service and service.subtitle()
subtitlelist = subtitle and subtitle.getSubtitleList()
self.selectedSubtitle = None
if self.subtitlesEnabled():
self.selectedSubtitle = self.infobar.selected_subtitle
if self.selectedSubtitle and self.selectedSubtitle[:4] == (0,0,0,0):
self.selectedSubtitle = None
elif self.selectedSubtitle and not self.selectedSubtitle[:4] in (x[:4] for x in subtitlelist):
subtitlelist.append(self.selectedSubtitle)
return subtitlelist
def subtitlesEnabled(self):
try:
return self.infobar.subtitle_window.shown
except:
return False
def enableSubtitle(self, subtitle):
if self.infobar.selected_subtitle != subtitle:
self.infobar.enableSubtitle(subtitle)
def changeAC3Downmix(self, downmix):
config.av.downmix_ac3.value = downmix.getValue() == True
config.av.downmix_ac3.save()
if SystemInfo["CanDownmixDTS"]:
config.av.downmix_dts.value = config.av.downmix_ac3.value
config.av.downmix_dts.save()
if SystemInfo["CanDownmixAAC"]:
config.av.downmix_aac.value = config.av.downmix_ac3.value
config.av.downmix_aac.save()
def changeMode(self, mode):
if mode is not None and self.audioChannel:
self.audioChannel.selectChannel(int(mode.getValue()))
def changeAudio(self, audio):
track = int(audio)
if isinstance(track, int):
if self.session.nav.getCurrentService().audioTracks().getNumberOfTracks() > track:
self.audioTracks.selectTrack(track)
def keyLeft(self):
if self.focus == FOCUS_CONFIG:
ConfigListScreen.keyLeft(self)
elif self.focus == FOCUS_STREAMS:
self["streams"].setIndex(0)
def keyRight(self, config = False):
if config or self.focus == FOCUS_CONFIG:
if self["config"].getCurrentIndex() < 3:
ConfigListScreen.keyRight(self)
elif self["config"].getCurrentIndex() == 3:
if self.settings.menupage.getValue() == PAGE_AUDIO and hasattr(self, "plugincallfunc"):
self.plugincallfunc()
elif self.settings.menupage.getValue() == PAGE_SUBTITLES and self.infobar.selected_subtitle and self.infobar.selected_subtitle != (0,0,0,0):
self.session.open(QuickSubtitlesConfigMenu, self.infobar)
if self.focus == FOCUS_STREAMS and self["streams"].count() and config == False:
self["streams"].setIndex(self["streams"].count()-1)
def keyRed(self):
if self["key_red"].getBoolean():
self.colorkey(0)
else:
return 0
def keyGreen(self):
if self["key_green"].getBoolean():
self.colorkey(1)
else:
return 0
def keyYellow(self):
if self["key_yellow"].getBoolean():
self.colorkey(2)
else:
return 0
def keyBlue(self):
if self["key_blue"].getBoolean():
self.colorkey(3)
else:
return 0
def colorkey(self, idx):
self["config"].setCurrentIndex(idx)
self.keyRight(True)
def keyUp(self):
if self.focus == FOCUS_CONFIG:
self["config"].instance.moveSelection(self["config"].instance.moveUp)
elif self.focus == FOCUS_STREAMS:
if self["streams"].getIndex() == 0:
self["config"].instance.setSelectionEnable(True)
self["streams"].style = "notselected"
self["config"].setCurrentIndex(len(self["config"].getList())-1)
self.focus = FOCUS_CONFIG
else:
self["streams"].selectPrevious()
def keyDown(self):
if self.focus == FOCUS_CONFIG:
if self["config"].getCurrentIndex() < len(self["config"].getList())-1:
self["config"].instance.moveSelection(self["config"].instance.moveDown)
else:
self["config"].instance.setSelectionEnable(False)
self["streams"].style = "default"
self.focus = FOCUS_STREAMS
elif self.focus == FOCUS_STREAMS:
self["streams"].selectNext()
def volumeUp(self):
VolumeControl.instance and VolumeControl.instance.volUp()
def volumeDown(self):
VolumeControl.instance and VolumeControl.instance.volDown()
def volumeMute(self):
VolumeControl.instance and VolumeControl.instance.volMute()
def keyNumberGlobal(self, number):
if number <= len(self["streams"].list):
self["streams"].setIndex(number-1)
self.keyOk()
def keyOk(self):
if self.focus == FOCUS_STREAMS and self["streams"].list:
cur = self["streams"].getCurrent()
if self.settings.menupage.getValue() == PAGE_AUDIO and cur[0] is not None:
self.changeAudio(cur[0])
self.__updatedInfo()
if self.settings.menupage.getValue() == PAGE_SUBTITLES and cur[0] is not None:
if self.infobar.selected_subtitle and self.infobar.selected_subtitle[:4] == cur[0][:4]:
self.enableSubtitle(None)
selectedidx = self["streams"].getIndex()
self.__updatedInfo()
self["streams"].setIndex(selectedidx)
else:
self.enableSubtitle(cur[0][:5])
self.__updatedInfo()
self.close(0)
elif self.focus == FOCUS_CONFIG:
self.keyRight()
def openAutoLanguageSetup(self):
if self.protectContextMenu and config.ParentalControl.setuppinactive.value and config.ParentalControl.config_sections.context_menus.value:
self.session.openWithCallback(self.protectResult, PinInput, pinList=[x.value for x in config.ParentalControl.servicepin], triesEntry=config.ParentalControl.retries.servicepin, title=_("Please enter the correct pin code"), windowTitle=_("Enter pin code"))
else:
self.protectResult(True)
def protectResult(self, answer):
if answer:
self.session.open(Setup, "autolanguagesetup")
self.protectContextMenu = False
elif answer is not None:
self.session.openWithCallback(self.close, MessageBox, _("The pin code you entered is wrong."), MessageBox.TYPE_ERROR)
def cancel(self):
self.close(0)
class SubtitleSelection(AudioSelection):
def __init__(self, session, infobar=None):
AudioSelection.__init__(self, session, infobar, page=PAGE_SUBTITLES)
self.skinName = ["AudioSelection"]
class QuickSubtitlesConfigMenu(ConfigListScreen, Screen):
skin = """
<screen position="50,50" size="480,280" title="Subtitle settings" backgroundColor="#7f000000" flags="wfNoBorder">
<widget name="config" position="5,5" size="470,250" font="Regular;18" zPosition="1" transparent="1" selectionPixmap="PLi-HD/buttons/sel.png" valign="center" />
<widget name="videofps" position="5,255" size="470,20" backgroundColor="secondBG" transparent="1" zPosition="1" font="Regular;16" valign="center" halign="left" foregroundColor="blue"/>
</screen>"""
def __init__(self, session, infobar):
Screen.__init__(self, session)
self.skin = QuickSubtitlesConfigMenu.skin
self.infobar = infobar or self.session.infobar
self.wait = eTimer()
self.wait.timeout.get().append(self.resyncSubtitles)
self["videofps"] = Label("")
sub = self.infobar.selected_subtitle
if sub[0] == 0: # dvb
menu = [
getConfigMenuItem("config.subtitles.dvb_subtitles_yellow"),
getConfigMenuItem("config.subtitles.dvb_subtitles_centered"),
getConfigMenuItem("config.subtitles.dvb_subtitles_backtrans"),
getConfigMenuItem("config.subtitles.dvb_subtitles_original_position"),
getConfigMenuItem("config.subtitles.subtitle_position"),
getConfigMenuItem("config.subtitles.subtitle_bad_timing_delay"),
getConfigMenuItem("config.subtitles.subtitle_noPTSrecordingdelay"),
]
elif sub[0] == 1: # teletext
menu = [
getConfigMenuItem("config.subtitles.ttx_subtitle_colors"),
getConfigMenuItem("config.subtitles.ttx_subtitle_original_position"),
getConfigMenuItem("config.subtitles.subtitle_fontsize"),
getConfigMenuItem("config.subtitles.subtitle_position"),
getConfigMenuItem("config.subtitles.subtitle_rewrap"),
getConfigMenuItem("config.subtitles.subtitle_borderwidth"),
getConfigMenuItem("config.subtitles.subtitle_alignment"),
getConfigMenuItem("config.subtitles.subtitle_bad_timing_delay"),
getConfigMenuItem("config.subtitles.subtitle_noPTSrecordingdelay"),
]
else: # pango
menu = [
getConfigMenuItem("config.subtitles.pango_subtitles_delay"),
getConfigMenuItem("config.subtitles.pango_subtitle_colors"),
getConfigMenuItem("config.subtitles.pango_subtitle_fontswitch"),
getConfigMenuItem("config.subtitles.colourise_dialogs"),
getConfigMenuItem("config.subtitles.subtitle_fontsize"),
getConfigMenuItem("config.subtitles.subtitle_position"),
getConfigMenuItem("config.subtitles.subtitle_alignment"),
getConfigMenuItem("config.subtitles.subtitle_rewrap"),
getConfigMenuItem("config.subtitles.subtitle_borderwidth"),
getConfigMenuItem("config.subtitles.pango_subtitles_fps"),
]
self["videofps"].setText(_("Video: %s fps") % (self.getFps().rstrip(".000")))
ConfigListScreen.__init__(self, menu, self.session, on_change = self.changedEntry)
self["actions"] = NumberActionMap(["SetupActions"],
{
"cancel": self.cancel,
"ok": self.ok,
},-2)
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
if not self["videofps"].text:
self.instance.resize(eSize(self.instance.size().width(), self["config"].l.getItemSize().height()*len(self["config"].getList()) + 10))
def changedEntry(self):
if self["config"].getCurrent() in [getConfigMenuItem("config.subtitles.pango_subtitles_delay"),getConfigMenuItem("config.subtitles.pango_subtitles_fps")]:
self.wait.start(500, True)
def resyncSubtitles(self):
self.infobar.setSeekState(self.infobar.SEEK_STATE_PAUSE)
self.infobar.setSeekState(self.infobar.SEEK_STATE_PLAY)
def getFps(self):
from enigma import iServiceInformation
service = self.session.nav.getCurrentService()
info = service and service.info()
if not info:
return ""
fps = info.getInfo(iServiceInformation.sFrameRate)
if fps > 0:
return "%6.3f" % (fps/1000.)
return ""
def cancel(self):
self.close()
def ok(self):
config.subtitles.save()
self.close()
|
sodafree/backend | refs/heads/master | build/lib.linux-i686-2.7/django/core/serializers/pyyaml.py | 81 | """
YAML serializer.
Requires PyYaml (http://pyyaml.org/), but that's checked for in __init__.
"""
from StringIO import StringIO
import decimal
import yaml
from django.db import models
from django.core.serializers.base import DeserializationError
from django.core.serializers.python import Serializer as PythonSerializer
from django.core.serializers.python import Deserializer as PythonDeserializer
class DjangoSafeDumper(yaml.SafeDumper):
def represent_decimal(self, data):
return self.represent_scalar('tag:yaml.org,2002:str', str(data))
DjangoSafeDumper.add_representer(decimal.Decimal, DjangoSafeDumper.represent_decimal)
class Serializer(PythonSerializer):
"""
Convert a queryset to YAML.
"""
internal_use_only = False
def handle_field(self, obj, field):
# A nasty special case: base YAML doesn't support serialization of time
# types (as opposed to dates or datetimes, which it does support). Since
# we want to use the "safe" serializer for better interoperability, we
# need to do something with those pesky times. Converting 'em to strings
# isn't perfect, but it's better than a "!!python/time" type which would
# halt deserialization under any other language.
if isinstance(field, models.TimeField) and getattr(obj, field.name) is not None:
self._current[field.name] = str(getattr(obj, field.name))
else:
super(Serializer, self).handle_field(obj, field)
def end_serialization(self):
yaml.dump(self.objects, self.stream, Dumper=DjangoSafeDumper, **self.options)
def getvalue(self):
return self.stream.getvalue()
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of YAML data.
"""
if isinstance(stream_or_string, basestring):
stream = StringIO(stream_or_string)
else:
stream = stream_or_string
try:
for obj in PythonDeserializer(yaml.safe_load(stream), **options):
yield obj
except GeneratorExit:
raise
except Exception, e:
# Map to deserializer error
raise DeserializationError(e)
|
moreati/django | refs/heads/master | tests/check_framework/models.py | 396 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
class SimpleModel(models.Model):
field = models.IntegerField()
manager = models.manager.Manager()
|
petry/kanboard | refs/heads/master | apps/teams/tests/__init__.py | 4 | __author__ = 'petry'
|
torufuru/OFPatchPanel | refs/heads/hackathon | ryu/lib/packet/mpls.py | 22 | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
import socket
from . import packet_base
from . import packet_utils
from . import ipv4
from ryu.ofproto import ether
class mpls(packet_base.PacketBase):
"""MPLS (RFC 3032) header encoder/decoder class.
NOTE: When decoding, this implementation assumes that the inner protocol
is IPv4.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the corresponding args in this order.
============== ====================
Attribute Description
============== ====================
label Label Value
exp Experimental Use
bsb Bottom of Stack
ttl Time To Live
============== ====================
"""
_PACK_STR = '!I'
_MIN_LEN = struct.calcsize(_PACK_STR)
def __init__(self, label=0, exp=0, bsb=1, ttl=255):
super(mpls, self).__init__()
self.label = label
self.exp = exp
self.bsb = bsb
self.ttl = ttl
@classmethod
def parser(cls, buf):
(label,) = struct.unpack_from(cls._PACK_STR, buf)
ttl = label & 0xff
bsb = (label >> 8) & 1
exp = (label >> 9) & 7
label = label >> 12
msg = cls(label, exp, bsb, ttl)
if bsb:
return msg, ipv4.ipv4, buf[msg._MIN_LEN:]
else:
return msg, mpls, buf[msg._MIN_LEN:]
def serialize(self, payload, prev):
val = self.label << 12 | self.exp << 9 | self.bsb << 8 | self.ttl
return struct.pack(mpls._PACK_STR, val)
|
henniggroup/MPInterfaces | refs/heads/master | mpinterfaces/mat2d/electronic_structure/analysis.py | 1 | from __future__ import print_function, division, unicode_literals
import os
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
import numpy as np
from scipy.spatial.distance import euclidean
from pymatgen.core.structure import Structure
from pymatgen.io.vasp.outputs import Vasprun, Locpot, VolumetricData
from pymatgen.io.vasp.inputs import Incar
from pymatgen.electronic_structure.plotter import BSPlotter, BSPlotterProjected
from pymatgen.electronic_structure.core import Spin
from mpinterfaces.utils import is_converged
__author__ = "Michael Ashton"
__copyright__ = "Copyright 2017, Henniggroup"
__maintainer__ = "Michael Ashton"
__email__ = "joshgabriel92@gmail.com"
__status__ = "Production"
__date__ = "March 3, 2017"
def get_band_edges():
"""
Calculate the band edge locations relative to the vacuum level
for a semiconductor. For a metal, returns the fermi level.
Returns:
edges (dict): {'up_cbm': , 'up_vbm': , 'dn_cbm': , 'dn_vbm': , 'efermi'}
"""
# Vacuum level energy from LOCPOT.
locpot = Locpot.from_file('LOCPOT')
evac = max(locpot.get_average_along_axis(2))
vasprun = Vasprun('vasprun.xml')
bs = vasprun.get_band_structure()
eigenvals = vasprun.eigenvalues
efermi = vasprun.efermi - evac
if bs.is_metal():
edges = {'up_cbm': None, 'up_vbm': None, 'dn_cbm': None, 'dn_vbm': None,
'efermi': efermi}
elif bs.is_spin_polarized:
up_cbm = min(
[min([e[0] for e in eigenvals[Spin.up][i] if not e[1]])
for i in range(len(eigenvals[Spin.up]))]) - evac
up_vbm = max(
[max([e[0] for e in eigenvals[Spin.up][i] if e[1]])
for i in range(len(eigenvals[Spin.up]))]) - evac
dn_cbm = min(
[min([e[0] for e in eigenvals[Spin.down][i] if not e[1]])
for i in range(len(eigenvals[Spin.down]))]) - evac
dn_vbm = max(
[max([e[0] for e in eigenvals[Spin.down][i] if e[1]])
for i in range(len(eigenvals[Spin.down]))]) - evac
edges = {'up_cbm': up_cbm, 'up_vbm': up_vbm, 'dn_cbm': dn_cbm,
'dn_vbm': dn_vbm, 'efermi': efermi}
else:
cbm = bs.get_cbm()['energy'] - evac
vbm = bs.get_vbm()['energy'] - evac
edges = {'up_cbm': cbm, 'up_vbm': vbm, 'dn_cbm': cbm, 'dn_vbm': vbm,
'efermi': efermi}
return edges
def plot_band_alignments(directories, run_type='PBE', fmt='pdf'):
"""
Plot CBM's and VBM's of all compounds together, relative to the band
edges of H2O.
Args:
directories (list): list of the directory paths for materials
to include in the plot.
run_type (str): 'PBE' or 'HSE', so that the function knows which
subdirectory to go into (pbe_bands or hse_bands).
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
if run_type == 'HSE':
subdirectory = 'hse_bands'
else:
subdirectory = 'pbe_bands'
band_gaps = {}
for directory in directories:
sub_dir = os.path.join(directory, subdirectory)
if is_converged(sub_dir):
os.chdir(sub_dir)
band_structure = Vasprun('vasprun.xml').get_band_structure()
band_gap = band_structure.get_band_gap()
# Vacuum level energy from LOCPOT.
locpot = Locpot.from_file('LOCPOT')
evac = max(locpot.get_average_along_axis(2))
if not band_structure.is_metal():
is_direct = band_gap['direct']
cbm = band_structure.get_cbm()
vbm = band_structure.get_vbm()
else:
cbm = None
vbm = None
is_direct = False
band_gaps[directory] = {'CBM': cbm, 'VBM': vbm,
'Direct': is_direct,
'Metal': band_structure.is_metal(),
'E_vac': evac}
os.chdir('../../')
ax = plt.figure(figsize=(16, 10)).gca()
x_max = len(band_gaps) * 1.315
ax.set_xlim(0, x_max)
# Rectangle representing band edges of water.
ax.add_patch(plt.Rectangle((0, -5.67), height=1.23, width=len(band_gaps),
facecolor='#00cc99', linewidth=0))
ax.text(len(band_gaps) * 1.01, -4.44, r'$\mathrm{H+/H_2}$', size=20,
verticalalignment='center')
ax.text(len(band_gaps) * 1.01, -5.67, r'$\mathrm{O_2/H_2O}$', size=20,
verticalalignment='center')
x_ticklabels = []
y_min = -8
i = 0
# Nothing but lies.
are_directs, are_indirects, are_metals = False, False, False
for compound in [cpd for cpd in directories if cpd in band_gaps]:
x_ticklabels.append(compound)
# Plot all energies relative to their vacuum level.
evac = band_gaps[compound]['E_vac']
if band_gaps[compound]['Metal']:
cbm = -8
vbm = -2
else:
cbm = band_gaps[compound]['CBM']['energy'] - evac
vbm = band_gaps[compound]['VBM']['energy'] - evac
# Add a box around direct gap compounds to distinguish them.
if band_gaps[compound]['Direct']:
are_directs = True
linewidth = 5
elif not band_gaps[compound]['Metal']:
are_indirects = True
linewidth = 0
# Metals are grey.
if band_gaps[compound]['Metal']:
are_metals = True
linewidth = 0
color_code = '#404040'
else:
color_code = '#002b80'
# CBM
ax.add_patch(plt.Rectangle((i, cbm), height=-cbm, width=0.8,
facecolor=color_code, linewidth=linewidth,
edgecolor="#e68a00"))
# VBM
ax.add_patch(plt.Rectangle((i, y_min),
height=(vbm - y_min), width=0.8,
facecolor=color_code, linewidth=linewidth,
edgecolor="#e68a00"))
i += 1
ax.set_ylim(y_min, 0)
# Set tick labels
ax.set_xticks([n + 0.4 for n in range(i)])
ax.set_xticklabels(x_ticklabels, family='serif', size=20, rotation=60)
ax.set_yticklabels(ax.get_yticks(), family='serif', size=20)
# Add a legend
height = y_min
if are_directs:
ax.add_patch(plt.Rectangle((i*1.165, height), width=i*0.15,
height=(-y_min*0.1), facecolor='#002b80',
edgecolor='#e68a00', linewidth=5))
ax.text(i*1.24, height - y_min * 0.05, 'Direct', family='serif',
color='w', size=20, horizontalalignment='center',
verticalalignment='center')
height -= y_min * 0.15
if are_indirects:
ax.add_patch(plt.Rectangle((i*1.165, height), width=i*0.15,
height=(-y_min*0.1), facecolor='#002b80',
linewidth=0))
ax.text(i*1.24, height - y_min * 0.05, 'Indirect', family='serif',
size=20, color='w', horizontalalignment='center',
verticalalignment='center')
height -= y_min * 0.15
if are_metals:
ax.add_patch(plt.Rectangle((i*1.165, height), width=i*0.15,
height=(-y_min*0.1), facecolor='#404040',
linewidth=0))
ax.text(i*1.24, height - y_min * 0.05, 'Metal', family='serif',
size=20, color='w', horizontalalignment='center',
verticalalignment='center')
# Who needs axes?
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ax.set_ylabel('eV', family='serif', size=24)
if fmt == "None":
return ax
else:
plt.savefig('band_alignments.{}'.format(fmt), transparent=True)
plt.close()
def plot_local_potential(axis=2, ylim=(-20, 0), fmt='pdf'):
"""
Plot data from the LOCPOT file along any of the 3 primary axes.
Useful for determining surface dipole moments and electric
potentials on the interior of the material.
Args:
axis (int): 0 = x, 1 = y, 2 = z
ylim (tuple): minimum and maximum potentials for the plot's y-axis.
fmt (str): matplotlib format style. Check the matplotlib docs
for options.
"""
ax = plt.figure(figsize=(16, 10)).gca()
locpot = Locpot.from_file('LOCPOT')
structure = Structure.from_file('CONTCAR')
vd = VolumetricData(structure, locpot.data)
abs_potentials = vd.get_average_along_axis(axis)
vacuum_level = max(abs_potentials)
vasprun = Vasprun('vasprun.xml')
bs = vasprun.get_band_structure()
if not bs.is_metal():
cbm = bs.get_cbm()['energy'] - vacuum_level
vbm = bs.get_vbm()['energy'] - vacuum_level
potentials = [potential - vacuum_level for potential in abs_potentials]
axis_length = structure.lattice.lengths[axis]
positions = np.arange(0, axis_length, axis_length / len(potentials))
ax.plot(positions, potentials, linewidth=2, color='k')
ax.set_xlim(0, axis_length)
ax.set_ylim(ylim[0], ylim[1])
ax.set_xticklabels(
[r'$\mathrm{%s}$' % tick for tick in ax.get_xticks()], size=20)
ax.set_yticklabels(
[r'$\mathrm{%s}$' % tick for tick in ax.get_yticks()], size=20)
ax.set_xlabel(r'$\mathrm{\AA}$', size=24)
ax.set_ylabel(r'$\mathrm{V\/(eV)}$', size=24)
if not bs.is_metal():
ax.text(ax.get_xlim()[1], cbm, r'$\mathrm{CBM}$',
horizontalalignment='right', verticalalignment='bottom',
size=20)
ax.text(ax.get_xlim()[1], vbm, r'$\mathrm{VBM}$',
horizontalalignment='right', verticalalignment='top', size=20)
ax.fill_between(ax.get_xlim(), cbm, ax.get_ylim()[1],
facecolor=plt.cm.jet(0.3), zorder=0, linewidth=0)
ax.fill_between(ax.get_xlim(), ax.get_ylim()[0], vbm,
facecolor=plt.cm.jet(0.7), zorder=0, linewidth=0)
if fmt == "None":
return ax
else:
plt.savefig('locpot.{}'.format(fmt))
plt.close()
### This function uses Pymatgen to plot band structures, and doesn't
### handle KPOINTS with IBZKPT at the top very well. It also doesn't
### work with latex in the latest matplotlib version. If those
### things ever get fixed in Pymatgen we could go back to using
### this function.
# def plot_band_structure(ylim=(-5, 5), draw_fermi=False, fmt='pdf'):
# """
# Plot a standard band structure with no projections.
#
# Args:
# ylim (tuple): minimum and maximum potentials for the plot's y-axis.
# draw_fermi (bool): whether or not to draw a dashed line at E_F.
# fmt (str): matplotlib format style. Check the matplotlib docs
# for options.
# """
#
# vasprun = Vasprun('vasprun.xml')
# efermi = vasprun.efermi
# bsp = BSPlotter(vasprun.get_band_structure('KPOINTS', line_mode=True,
# efermi=efermi))
# if fmt == "None":
# return bsp.bs_plot_data()
# else:
# plot = bsp.get_plot(ylim=ylim)
# fig = plot.gcf()
# ax = fig.gca()
# ax.set_xticklabels([r'$\mathrm{%s}$' % t for t in ax.get_xticklabels()])
# ax.set_yticklabels([r'$\mathrm{%s}$' % t for t in ax.get_yticklabels()])
# if draw_fermi:
# ax.plot([ax.get_xlim()[0], ax.get_xlim()[1]], [0, 0], 'k--')
# plt.savefig('band_structure.{}'.format(fmt), transparent=True)
#
# plt.close()
def plot_band_structure(ylim=(-5, 5), draw_fermi=False, fmt="pdf"):
"""
Plot a standard band structure with no projections. Requires
EIGENVAL, OUTCAR and KPOINTS files in the current working directory.
Args:
ylim (tuple): minimum and maximum potentials for the plot's y-axis.
draw_fermi (bool): whether or not to draw a dashed line at E_F.
fmt (str): matplotlib format style. Check the matplotlib docs
for options.
"""
eigenval_lines = open("EIGENVAL").readlines()
kpoints_lines = open("KPOINTS").readlines()
# IBZ k-points used for SCF but not useful for plotting bands.
ibz_kpoints = [k for k in kpoints_lines[3:] if int(k.split()[3]) != 0]
# Lines containing hig-symmetry k-points (e.g. Gamma)
vertex_lines = [k for k in kpoints_lines[3:] if len(k.split()) == 5]
n_bands = int(eigenval_lines[5].split()[2])
with open("OUTCAR", "r") as outcar:
for line in outcar:
if "E-fermi" in line:
efermi = float(line.split()[2])
break
spin_polarized = False
if len(eigenval_lines[8].split()) == 5:
spin_polarized = True
bs_kpoints = []
vertices = []
bands = [[[], []] for x in range(n_bands)]
i = 7 + len(ibz_kpoints)*(n_bands+2)
while i < len(eigenval_lines):
kpt_coords = [float(x) for x in eigenval_lines[i].split()[:3]]
for kpt in vertex_lines:
ref_coords = [float(x) for x in kpt.split()[:3]]
if euclidean(kpt_coords, ref_coords) < 0.0001:
kpt_coords.append(kpt.split()[-1])
vertices.append(kpt_coords)
break
bs_kpoints.append(kpt_coords)
for j in range(n_bands):
i += 1
split_line = eigenval_lines[i].split()
bands[j][0].append(float(split_line[1]) - efermi)
if spin_polarized:
bands[j][1].append(float(split_line[2]) - efermi)
i += 2
path_lengths, kpt_distances = [], [0]
discontinuity = False
for i in range(1, len(vertices)):
if discontinuity:
path_lengths.append(0)
else:
path_lengths.append(euclidean(vertices[i][:3],vertices[i-1][:3]))
if i < len(vertices)-1 and vertices[i][3] != vertices[i-1][3] and\
vertices[i][3] != vertices[i+1][3] and not discontinuity:
discontinuity = True
else:
discontinuity = False
n_kpt_divs = len(bs_kpoints) / float(len(path_lengths))
x, j = 0, 0
for i in range(1, len(bs_kpoints)):
if len(bs_kpoints[i]) == 4 and len(bs_kpoints[i-1]) == 4 and \
bs_kpoints[i][3] != bs_kpoints[i-1][3]:
x += 0
else:
x += euclidean(bs_kpoints[i][:3], bs_kpoints[i-1][:3])
kpt_distances.append(x)
ax = plt.figure(figsize=(11, 8.5)).gca()
font = FontProperties()
font.set_size(24)
font.set_family("serif")
large_font = font.copy()
large_font.set_size(32)
for b in bands:
ax.plot(kpt_distances, b[0], 'b-')
if spin_polarized:
ax.plot(kpt_distances, b[1], 'r--')
if draw_fermi:
ax.plot([min(kpt_distances), max(kpt_distances)], [0, 0], 'k-')
ax.set_xlim(min(kpt_distances), max(kpt_distances))
ax.set_xticks([])
d = 0
ax.text(d, ylim[0]*1.05, r"$\mathrm{%s}$" % vertices[0][-1],
fontproperties=font, verticalalignment="top",
horizontalalignment="center")
for i in range(len(path_lengths)):
d += path_lengths[i]
if i < len(path_lengths)-1 and path_lengths[i+1] == 0 and\
vertices[i+1][-1] != vertices[i+2][-1]:
label = "{}|{}".format(vertices[i+1][-1], vertices[i+2][-1])
else:
label = vertices[i+1][-1]
if path_lengths[i] != 0:
ax.text(d, ylim[0]*1.05, r"$\mathrm{%s}$" % label,
fontproperties=font, verticalalignment="top",
horizontalalignment="center")
ax.plot([d, d], [ylim[0], ylim[1]], 'k--')
ax.set_ylim(ylim)
ax.set_ylabel(r"$\mathrm{E - E_F (eV)}$", fontproperties=large_font)
ax.set_yticklabels([int(t) for t in ax.get_yticks()], fontproperties=font)
plt.savefig("band_structure.{}".format(fmt))
def plot_color_projected_bands(ylim=(-5, 5), fmt='pdf'):
"""
Plot a single band structure where the color of the band indicates
the elemental character of the eigenvalue.
Args:
ylim (tuple): minimum and maximum energies for the plot's
y-axis.
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
vasprun = Vasprun('vasprun.xml', parse_projected_eigen=True)
bs = vasprun.get_band_structure('KPOINTS', line_mode=True)
bspp = BSPlotterProjected(bs)
ax = bspp.get_elt_projected_plots_color().gcf().gca()
ax.set_xticklabels([r'$\mathrm{%s}$' % t for t in ax.get_xticklabels()])
ax.set_yticklabels([r'$\mathrm{%s}$' % t for t in ax.get_yticklabels()])
ax.set_ylim(ylim)
if fmt == "None":
return ax
else:
plt.savefig('color_projected_bands.{}'.format(fmt))
plt.close()
def plot_elt_projected_bands(ylim=(-5, 5), fmt='pdf'):
"""
Plot separate band structures for each element where the size of the
markers indicates the elemental character of the eigenvalue.
Args:
ylim (tuple): minimum and maximum energies for the plot's
y-axis.
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
vasprun = Vasprun('vasprun.xml', parse_projected_eigen=True)
bs = vasprun.get_band_structure('KPOINTS', line_mode=True)
bspp = BSPlotterProjected(bs)
ax = bspp.get_elt_projected_plots(ylim=ylim).gcf().gca()
ax.set_xticklabels([r'$\mathrm{%s}$' % t for t in ax.get_xticklabels()])
ax.set_yticklabels([r'$\mathrm{%s}$' % t for t in ax.get_yticklabels()])
if fmt == "None":
return ax
else:
plt.savefig('elt_projected_bands.{}'.format(fmt))
plt.close()
def plot_orb_projected_bands(orbitals, fmt='pdf', ylim=(-5, 5)):
"""
Plot a separate band structure for each orbital of each element in
orbitals.
Args:
orbitals (dict): dictionary of the form
{element: [orbitals]},
e.g. {'Mo': ['s', 'p', 'd'], 'S': ['p']}
ylim (tuple): minimum and maximum energies for the plot's
y-axis.
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
vasprun = Vasprun('vasprun.xml', parse_projected_eigen=True)
bs = vasprun.get_band_structure('KPOINTS', line_mode=True)
bspp = BSPlotterProjected(bs)
ax = bspp.get_projected_plots_dots(orbitals, ylim=ylim).gcf().gca()
ax.set_xticklabels([r'$\mathrm{%s}$' % t for t in ax.get_xticklabels()])
ax.set_yticklabels([r'$\mathrm{%s}$' % t for t in ax.get_yticklabels()])
if fmt == "None":
return ax
else:
plt.savefig('orb_projected_bands.{}'.format(fmt))
plt.close()
def get_effective_mass():
"""
This function is in a beta stage, and its results are not
guaranteed to be useful.
Finds effective masses from a band structure, using parabolic
fitting to determine the band curvature at the CBM
for electrons and at the VBM for holes. This curvature enters
the equation m* = (hbar)**2 / (d^2E/dk^2).
To consider anisotropy, the k-space directions to the left and right
of the CBM/VBM in the band diagram are returned separately.
*NOTE* Only works for semiconductors and linemode calculations (non-
spin polarized).
>30 k-points per string recommended to obtain
reliable curvatures.
*NOTE* The parabolic fit can be quite sensitive to the number of
k-points fit to, so it might be worthwhile adjusting N_KPTS
to obtain some sense of the error bar.
TODO: Warn user if CBM/VBM is at the edge of the diagram, and which
direction (either left or right) was not actually fit to.
Until fixed, this (most likely) explains any negative masses
returned.
Returns:
Dictionary of the form
{'electron': {'left': e_m_eff_l, 'right': e_m_eff_r},
'hole': {'left': h_m_eff_l, 'right': h_m_eff_r}}
where 'left' and 'right' indicate the reciprocal
directions to the left and right of the extremum in the
band structure.
"""
H_BAR = 6.582119514e-16 # eV*s
M_0 = 9.10938356e-31 # kg
N_KPTS = 6 # Number of k-points included in the parabola.
spin_up = Spin(1)
band_structure = Vasprun('vasprun.xml').get_band_structure()
# Locations of CBM and VBM in band_structure.bands
cbm_band_index = band_structure.get_cbm()['band_index'][spin_up][0]
cbm_kpoint_index = band_structure.get_cbm()['kpoint_index'][0]
vbm_band_index = band_structure.get_vbm()['band_index'][spin_up][0]
vbm_kpoint_index = band_structure.get_vbm()['kpoint_index'][0]
k = {'electron': {'left': [], 'right': []},
'hole': {'left': [], 'right': []}}
E = {'electron': {'left': [], 'right': []},
'hole': {'left': [], 'right': []}}
e_ref_coords = band_structure.kpoints[cbm_kpoint_index]._ccoords
h_ref_coords = band_structure.kpoints[vbm_kpoint_index]._ccoords
for n in range(-N_KPTS, 1):
e_coords = band_structure.kpoints[cbm_kpoint_index + n]._ccoords
h_coords = band_structure.kpoints[vbm_kpoint_index + n]._ccoords
k['electron']['left'].append(
((e_coords[0] - e_ref_coords[0])**2 +
(e_coords[1] - e_ref_coords[1])**2 +
(e_coords[2] - e_ref_coords[2])**2)**0.5
)
k['hole']['left'].append(
((h_coords[0] - h_ref_coords[0])**2 +
(h_coords[1] - h_ref_coords[1])**2 +
(h_coords[2] - h_ref_coords[2])**2)**0.5
)
e_energy = band_structure.bands[
spin_up][cbm_band_index][cbm_kpoint_index + n]
h_energy = band_structure.bands[
spin_up][vbm_band_index][vbm_kpoint_index + n]
E['electron']['left'].append(e_energy)
E['hole']['left'].append(h_energy)
for n in range(1, 1 + N_KPTS):
e_coords = band_structure.kpoints[cbm_kpoint_index + n]._ccoords
h_coords = band_structure.kpoints[vbm_kpoint_index + n]._ccoords
k['electron']['right'].append(
((e_coords[0] - e_ref_coords[0])**2 +
(e_coords[1] - e_ref_coords[1])**2 +
(e_coords[2] - e_ref_coords[2])**2)**0.5
)
k['hole']['right'].append(
((h_coords[0] - h_ref_coords[0])**2 +
(h_coords[1] - h_ref_coords[1])**2 +
(h_coords[2] - h_ref_coords[2])**2)**0.5
)
e_energy = band_structure.bands[
spin_up][cbm_band_index][cbm_kpoint_index + n]
h_energy = band_structure.bands[
spin_up][vbm_band_index][vbm_kpoint_index + n]
E['electron']['right'].append(e_energy)
E['hole']['right'].append(h_energy)
# 2nd order fits
e_l_fit = np.poly1d(
np.polyfit(k['electron']['left'], E['electron']['left'], 2))
e_r_fit = np.poly1d(
np.polyfit(k['electron']['right'], E['electron']['right'], 2))
h_l_fit = np.poly1d(
np.polyfit(k['hole']['left'], E['hole']['left'], 2))
h_r_fit = np.poly1d(
np.polyfit(k['hole']['right'], E['hole']['right'], 2))
# Curvatures
e_l_curvature = e_l_fit.deriv().deriv()[0]
e_r_curvature = e_r_fit.deriv().deriv()[0]
h_l_curvature = h_l_fit.deriv().deriv()[0]
h_r_curvature = h_r_fit.deriv().deriv()[0]
# Unit conversion
e_m_eff_l = 10 * ((H_BAR ** 2) / e_l_curvature) / M_0
e_m_eff_r = 10 * ((H_BAR ** 2) / e_r_curvature) / M_0
h_m_eff_l = -10 * ((H_BAR ** 2) / h_l_curvature) / M_0
h_m_eff_r = -10 * ((H_BAR ** 2) / h_r_curvature) / M_0
return {'electron': {'left': e_m_eff_l, 'right': e_m_eff_r},
'hole': {'left': h_m_eff_l, 'right': h_m_eff_r}}
def plot_density_of_states(xlim=(-10, 5), ylim=(-1.5, 1.5), fmt='pdf'):
"""
Plots the density of states from the DOSCAR in the cwd. Plots
spin up in red, down in green, and the sum in black. Efermi = 0.
Args:
xlim (tuple): minimum and maximum energies for the plot's
x-axis.
ylim (tuple): minimum and maximum for the plot's
y-axis.
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
efermi = Vasprun('vasprun.xml').efermi
dos_lines = open ('DOSCAR').readlines()
x, up, down = [], [], []
nedos = Incar.from_file('INCAR').as_dict()['NEDOS'] - 1
for line in dos_lines[6:6+nedos]:
split_line = line.split()
x.append(float(split_line[0]) - efermi)
up.append(float(split_line[1]))
down.append(-float(split_line[2]))
x, up, down = np.array(x), np.array(up), np.array(down)
sum = up + down
ax = plt.figure().gca()
ax.set_xlim(xlim[0], xlim[1])
ax.set_ylim(ylim[0], ylim[1])
ax.set_xlabel(r'$\mathrm{E\/(eV)}$')
ax.set_ylabel(r'$\mathrm{Density\/of\/States$')
ax.set_xticklabels([r'$\mathrm{%s}$' % t for t in ax.get_xticklabels()])
ax.set_yticklabels([r'$\mathrm{%s}$' % t for t in ax.get_yticklabels()])
ax.plot(x, up, color='red' )
ax.plot(x, down, color='green')
ax.plot(x, sum, color='black' )
if fmt is not None:
plt.savefig('density_of_states.{}'.format(fmt))
else:
return ax
plt.close()
def get_fermi_velocities():
"""
Calculates the fermi velocity of each band that crosses the fermi
level, according to v_F = dE/(h_bar*dk).
Returns:
fermi_velocities (list). The absolute values of the
adjusted slopes of each band, in Angstroms/s.
"""
vr = Vasprun('vasprun.xml')
# eigenvalues = vr.eigenvalues
bs = vr.get_band_structure()
bands = bs.bands
kpoints = bs.kpoints
efermi = bs.efermi
h_bar = 6.582e-16 # eV*s
fermi_bands = []
for spin in bands:
for i in range(len(bands[spin])):
if max(bands[spin][i]) > efermi > min(bands[spin][i]):
fermi_bands.append(bands[spin][i])
fermi_velocities = []
for band in fermi_bands:
for i in range(len(band)-1):
if (band[i] < efermi < band[i+1]) or (band[i] > efermi > band[i+1]):
dk = np.sqrt((kpoints[i+1].cart_coords[0]
- kpoints[i].cart_coords[0])**2
+ (kpoints[i+1].cart_coords[1]
- kpoints[i].cart_coords[1])**2)
v_f = abs((band[i+1] - band[i]) / (h_bar * dk))
fermi_velocities.append(v_f)
return fermi_velocities # Values are in Angst./s
def find_dirac_nodes():
"""
Look for band crossings near (within `tol` eV) the Fermi level.
Returns:
boolean. Whether or not a band crossing occurs at or near
the fermi level.
"""
vasprun = Vasprun('vasprun.xml')
dirac = False
if vasprun.get_band_structure().get_band_gap()['energy'] < 0.1:
efermi = vasprun.efermi
bsp = BSPlotter(vasprun.get_band_structure('KPOINTS', line_mode=True,
efermi=efermi))
bands = []
data = bsp.bs_plot_data(zero_to_efermi=True)
for d in range(len(data['distances'])):
for i in range(bsp._nb_bands):
x = data['distances'][d],
y = [data['energy'][d][str(Spin.up)][i][j]
for j in range(len(data['distances'][d]))]
band = [x, y]
bands.append(band)
considered = []
for i in range(len(bands)):
for j in range(len(bands)):
if i != j and (j, i) not in considered:
considered.append((j, i))
for k in range(len(bands[i][0])):
if ((-0.1 < bands[i][1][k] < 0.1) and
(-0.1 < bands[i][1][k] - bands[j][1][k] < 0.1)):
dirac = True
return dirac
def plot_spin_texture(inner_index, outer_index, center=(0, 0), fmt='pdf'):
"""
Create six plots- one for the spin texture in x, y, and z in
each of two bands: an inner band and an outer band. For
Rashba spin-splitting, these two bands should be the two that
have split.
Args:
inner_index (int): indices of the two spin-split bands.
outer_index (int): indices of the two spin-split bands.
center (tuple): coordinates of the center of the splitting
(where the bands cross). Defaults to Gamma.
fmt: matplotlib format style. Check the matplotlib
docs for options.
"""
procar_lines = open("PROCAR").readlines()
data = procar_lines[1].split()
n_kpts = int(data[3])
n_bands = int(data[7])
n_ions = int(data[11])
# These numbers, along with almost everything else in this
# function, are magical. Don't touch them.
band_step = (n_ions + 1) * 4 + 4
k_step = n_bands * band_step + 3
kpoints = []
spin_textures = {'inner': {'x': [], 'y': [], 'z': []},
'outer': {'x': [], 'y': [], 'z': []}}
for n in range(n_kpts):
for var in ['x', 'y', 'z']:
spin_textures['inner'][var].append(0)
spin_textures['outer'][var].append(0)
i = 3
j = 0
while i < len(procar_lines):
kpoints.append([float(procar_lines[i][18:29]) - center[0],
float(procar_lines[i][29:40]) - center[1]])
spin_textures['inner']['x'][j] += float(
procar_lines[i+(4+(n_ions+1)*2)+inner_index*band_step].split()[-1])
spin_textures['inner']['y'][j] += float(
procar_lines[i+(4+(n_ions+1)*3)+inner_index*band_step].split()[-1])
spin_textures['inner']['z'][j] += float(
procar_lines[i+(4+(n_ions+1)*4)+inner_index*band_step].split()[-1])
spin_textures['outer']['x'][j] += float(
procar_lines[i+(4+(n_ions+1)*2)+outer_index*band_step].split()[-1])
spin_textures['outer']['y'][j] += float(
procar_lines[i+(4+(n_ions+1)*3)+outer_index*band_step].split()[-1])
spin_textures['outer']['z'][j] += float(
procar_lines[i+(4+(n_ions+1)*4)+outer_index*band_step].split()[-1])
i += k_step
j += 1
for branch in spin_textures:
for vector in spin_textures[branch]:
print('plotting {}_{}.{}'.format(branch, vector, fmt))
ax = plt.subplot(111, projection='polar')
raw = [spin_textures[branch][vector][k] for k in range(len(kpoints))]
minimum = min(raw)
maximum = max(raw) - minimum
r_max = max([np.sqrt(kpt[0]**2 + kpt[1]**2) for kpt in kpoints])
for l in range(len(kpoints)):
if kpoints[l][0] == 0 and kpoints[l][1] > 0:
theta = np.pi / 2.0
elif kpoints[l][0] == 0:
theta = 3.0 * np.pi / 2.0
elif kpoints[l][0] < 0:
theta = np.pi + np.arctan(kpoints[l][1] / kpoints[l][0])
else:
theta = np.arctan(kpoints[l][1] / kpoints[l][0])
r = np.sqrt(kpoints[l][0]**2 + kpoints[l][1]**2)
if r == 0:
w = 0
else:
w = r_max*0.07/r
ax.add_patch(
plt.Rectangle(
(theta, r), width=w, height=r_max*0.07,
color=plt.cm.rainbow(
(spin_textures[branch][vector][l]-minimum)/maximum
)
)
)
ax.plot(0, 0, linewidth=0, marker='o', color='k', markersize=18)
ax.set_rmax(r_max)
plt.axis('off')
plt.savefig('{}_{}.{}'.format(branch, vector, fmt))
plt.close()
|
armando-migliaccio/tempest | refs/heads/master | tempest/services/compute/xml/fixed_ips_client.py | 3 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 IBM Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.common.rest_client import RestClientXML
from tempest.services.compute.xml.common import Document
from tempest.services.compute.xml.common import Element
from tempest.services.compute.xml.common import Text
class FixedIPsClientXML(RestClientXML):
def __init__(self, config, username, password, auth_url, tenant_name=None):
super(FixedIPsClientXML, self).__init__(config, username, password,
auth_url, tenant_name)
self.service = self.config.compute.catalog_type
def get_fixed_ip_details(self, fixed_ip):
url = "os-fixed-ips/%s" % (fixed_ip)
resp, body = self.get(url, self.headers)
body = self._parse_resp(body)
return resp, body
def reserve_fixed_ip(self, ip, body):
"""This reserves and unreserves fixed ips."""
url = "os-fixed-ips/%s/action" % (ip)
# NOTE(maurosr): First converts the dict body to a json string then
# accept any action key value here to permit tests to cover cases with
# invalid actions raising badrequest.
key, value = body.popitem()
xml_body = Element(key)
xml_body.append(Text(value))
resp, body = self.post(url, str(Document(xml_body)), self.headers)
return resp, body
|
kartikdhar/djangotest | refs/heads/master | virt1/lib/python2.7/site-packages/django/db/backends/sqlite3/introspection.py | 13 | import re
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo, TableInfo,
)
field_size_re = re.compile(r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$')
def get_field_size(name):
""" Extract the size number from a "varchar(11)" type name """
m = field_size_re.search(name)
return int(m.group(1)) if m else None
# This light wrapper "fakes" a dictionary interface, because some SQLite data
# types include variables in them -- e.g. "varchar(30)" -- and can't be matched
# as a simple dictionary lookup.
class FlexibleFieldLookupDict(object):
# Maps SQL types to Django Field types. Some of the SQL types have multiple
# entries here because SQLite allows for anything and doesn't normalize the
# field type; it uses whatever was given.
base_data_types_reverse = {
'bool': 'BooleanField',
'boolean': 'BooleanField',
'smallint': 'SmallIntegerField',
'smallint unsigned': 'PositiveSmallIntegerField',
'smallinteger': 'SmallIntegerField',
'int': 'IntegerField',
'integer': 'IntegerField',
'bigint': 'BigIntegerField',
'integer unsigned': 'PositiveIntegerField',
'decimal': 'DecimalField',
'real': 'FloatField',
'text': 'TextField',
'char': 'CharField',
'blob': 'BinaryField',
'date': 'DateField',
'datetime': 'DateTimeField',
'time': 'TimeField',
}
def __getitem__(self, key):
key = key.lower()
try:
return self.base_data_types_reverse[key]
except KeyError:
size = get_field_size(key)
if size is not None:
return ('CharField', {'max_length': size})
raise KeyError
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = FlexibleFieldLookupDict()
def get_table_list(self, cursor):
"""
Returns a list of table and view names in the current database.
"""
# Skip the sqlite_sequence system table used for autoincrement key
# generation.
cursor.execute("""
SELECT name, type FROM sqlite_master
WHERE type in ('table', 'view') AND NOT name='sqlite_sequence'
ORDER BY name""")
return [TableInfo(row[0], row[1][0]) for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
return [FieldInfo(info['name'], info['type'], None, info['size'], None, None,
info['null_ok']) for info in self._table_info(cursor, table_name)]
def column_name_converter(self, name):
"""
SQLite will in some cases, e.g. when returning columns from views and
subselects, return column names in 'alias."column"' format instead of
simply 'column'.
Affects SQLite < 3.7.15, fixed by http://www.sqlite.org/src/info/5526e0aa3c
"""
# TODO: remove when SQLite < 3.7.15 is sufficiently old.
# 3.7.13 ships in Debian stable as of 2014-03-21.
if self.connection.Database.sqlite_version_info < (3, 7, 15):
return name.split('.')[-1].strip('"')
else:
return name
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
# Dictionary of relations to return
relations = {}
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
try:
results = cursor.fetchone()[0].strip()
except TypeError:
# It might be a view, then no results will be returned
return relations
results = results[results.index('(') + 1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_desc in results.split(','):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search('references (\S*) ?\(["|]?(.*)["|]?\)', field_desc, re.I)
if not m:
continue
table, column = [s.strip('"') for s in m.groups()]
if field_desc.startswith("FOREIGN KEY"):
# Find name of the target FK field
m = re.match('FOREIGN KEY\(([^\)]*)\).*', field_desc, re.I)
field_name = m.groups()[0].strip('"')
else:
field_name = field_desc.split()[0].strip('"')
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s", [table])
result = cursor.fetchall()[0]
other_table_results = result[0].strip()
li, ri = other_table_results.index('('), other_table_results.rindex(')')
other_table_results = other_table_results[li + 1:ri]
for other_desc in other_table_results.split(','):
other_desc = other_desc.strip()
if other_desc.startswith('UNIQUE'):
continue
other_name = other_desc.split(' ', 1)[0].strip('"')
if other_name == column:
relations[field_name] = (other_name, table)
break
return relations
def get_key_columns(self, cursor, table_name):
"""
Returns a list of (column_name, referenced_table_name, referenced_column_name) for all
key columns in given table.
"""
key_columns = []
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
results = cursor.fetchone()[0].strip()
results = results[results.index('(') + 1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_index, field_desc in enumerate(results.split(',')):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search('"(.*)".*references (.*) \(["|](.*)["|]\)', field_desc, re.I)
if not m:
continue
# This will append (column_name, referenced_table_name, referenced_column_name) to key_columns
key_columns.append(tuple(s.strip('"') for s in m.groups()))
return key_columns
def get_indexes(self, cursor, table_name):
indexes = {}
for info in self._table_info(cursor, table_name):
if info['pk'] != 0:
indexes[info['name']] = {'primary_key': True,
'unique': False}
cursor.execute('PRAGMA index_list(%s)' % self.connection.ops.quote_name(table_name))
# seq, name, unique
for index, unique in [(field[1], field[2]) for field in cursor.fetchall()]:
cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
info = cursor.fetchall()
# Skip indexes across multiple fields
if len(info) != 1:
continue
name = info[0][2] # seqno, cid, name
indexes[name] = {'primary_key': indexes.get(name, {}).get("primary_key", False),
'unique': unique}
return indexes
def get_primary_key_column(self, cursor, table_name):
"""
Get the column name of the primary key for the given table.
"""
# Don't use PRAGMA because that causes issues with some transactions
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
row = cursor.fetchone()
if row is None:
raise ValueError("Table %s does not exist" % table_name)
results = row[0].strip()
results = results[results.index('(') + 1:results.rindex(')')]
for field_desc in results.split(','):
field_desc = field_desc.strip()
m = re.search('"(.*)".*PRIMARY KEY( AUTOINCREMENT)?$', field_desc)
if m:
return m.groups()[0]
return None
def _table_info(self, cursor, name):
cursor.execute('PRAGMA table_info(%s)' % self.connection.ops.quote_name(name))
# cid, name, type, notnull, dflt_value, pk
return [{'name': field[1],
'type': field[2],
'size': get_field_size(field[2]),
'null_ok': not field[3],
'pk': field[5] # undocumented
} for field in cursor.fetchall()]
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
"""
constraints = {}
# Get the index info
cursor.execute("PRAGMA index_list(%s)" % self.connection.ops.quote_name(table_name))
for number, index, unique in cursor.fetchall():
# Get the index info for that index
cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
for index_rank, column_rank, column in cursor.fetchall():
if index not in constraints:
constraints[index] = {
"columns": [],
"primary_key": False,
"unique": bool(unique),
"foreign_key": False,
"check": False,
"index": True,
}
constraints[index]['columns'].append(column)
# Get the PK
pk_column = self.get_primary_key_column(cursor, table_name)
if pk_column:
# SQLite doesn't actually give a name to the PK constraint,
# so we invent one. This is fine, as the SQLite backend never
# deletes PK constraints by name, as you can't delete constraints
# in SQLite; we remake the table with a new PK instead.
constraints["__primary__"] = {
"columns": [pk_column],
"primary_key": True,
"unique": False, # It's not actually a unique constraint.
"foreign_key": False,
"check": False,
"index": False,
}
return constraints
|
40123142/2015cd_midterm2 | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/unittest/test/_test_warnings.py | 858 | # helper module for test_runner.Test_TextTestRunner.test_warnings
"""
This module has a number of tests that raise different kinds of warnings.
When the tests are run, the warnings are caught and their messages are printed
to stdout. This module also accepts an arg that is then passed to
unittest.main to affect the behavior of warnings.
Test_TextTestRunner.test_warnings executes this script with different
combinations of warnings args and -W flags and check that the output is correct.
See #10535.
"""
import sys
import unittest
import warnings
def warnfun():
warnings.warn('rw', RuntimeWarning)
class TestWarnings(unittest.TestCase):
# unittest warnings will be printed at most once per type (max one message
# for the fail* methods, and one for the assert* methods)
def test_assert(self):
self.assertEquals(2+2, 4)
self.assertEquals(2*2, 4)
self.assertEquals(2**2, 4)
def test_fail(self):
self.failUnless(1)
self.failUnless(True)
def test_other_unittest(self):
self.assertAlmostEqual(2+2, 4)
self.assertNotAlmostEqual(4+4, 2)
# these warnings are normally silenced, but they are printed in unittest
def test_deprecation(self):
warnings.warn('dw', DeprecationWarning)
warnings.warn('dw', DeprecationWarning)
warnings.warn('dw', DeprecationWarning)
def test_import(self):
warnings.warn('iw', ImportWarning)
warnings.warn('iw', ImportWarning)
warnings.warn('iw', ImportWarning)
# user warnings should always be printed
def test_warning(self):
warnings.warn('uw')
warnings.warn('uw')
warnings.warn('uw')
# these warnings come from the same place; they will be printed
# only once by default or three times if the 'always' filter is used
def test_function(self):
warnfun()
warnfun()
warnfun()
if __name__ == '__main__':
with warnings.catch_warnings(record=True) as ws:
# if an arg is provided pass it to unittest.main as 'warnings'
if len(sys.argv) == 2:
unittest.main(exit=False, warnings=sys.argv.pop())
else:
unittest.main(exit=False)
# print all the warning messages collected
for w in ws:
print(w.message)
|
mark-adams/python-social-auth | refs/heads/master | social/tests/test_utils.py | 73 | import sys
import unittest2 as unittest
from mock import Mock
from social.utils import sanitize_redirect, user_is_authenticated, \
user_is_active, slugify, build_absolute_uri, \
partial_pipeline_data
PY3 = sys.version_info[0] == 3
class SanitizeRedirectTest(unittest.TestCase):
def test_none_redirect(self):
self.assertEqual(sanitize_redirect('myapp.com', None), None)
def test_empty_redirect(self):
self.assertEqual(sanitize_redirect('myapp.com', ''), None)
def test_dict_redirect(self):
self.assertEqual(sanitize_redirect('myapp.com', {}), None)
def test_invalid_redirect(self):
self.assertEqual(sanitize_redirect('myapp.com', {'foo': 'bar'}), None)
def test_wrong_path_redirect(self):
self.assertEqual(
sanitize_redirect('myapp.com', 'http://notmyapp.com/path/'),
None
)
def test_valid_absolute_redirect(self):
self.assertEqual(
sanitize_redirect('myapp.com', 'http://myapp.com/path/'),
'http://myapp.com/path/'
)
def test_valid_relative_redirect(self):
self.assertEqual(sanitize_redirect('myapp.com', '/path/'), '/path/')
class UserIsAuthenticatedTest(unittest.TestCase):
def test_user_is_none(self):
self.assertEqual(user_is_authenticated(None), False)
def test_user_is_not_none(self):
self.assertEqual(user_is_authenticated(object()), True)
def test_user_has_is_authenticated(self):
class User(object):
is_authenticated = True
self.assertEqual(user_is_authenticated(User()), True)
def test_user_has_is_authenticated_callable(self):
class User(object):
def is_authenticated(self):
return True
self.assertEqual(user_is_authenticated(User()), True)
class UserIsActiveTest(unittest.TestCase):
def test_user_is_none(self):
self.assertEqual(user_is_active(None), False)
def test_user_is_not_none(self):
self.assertEqual(user_is_active(object()), True)
def test_user_has_is_active(self):
class User(object):
is_active = True
self.assertEqual(user_is_active(User()), True)
def test_user_has_is_active_callable(self):
class User(object):
def is_active(self):
return True
self.assertEqual(user_is_active(User()), True)
class SlugifyTest(unittest.TestCase):
def test_slugify_formats(self):
if PY3:
self.assertEqual(slugify('FooBar'), 'foobar')
self.assertEqual(slugify('Foo Bar'), 'foo-bar')
self.assertEqual(slugify('Foo (Bar)'), 'foo-bar')
else:
self.assertEqual(slugify('FooBar'.decode('utf-8')), 'foobar')
self.assertEqual(slugify('Foo Bar'.decode('utf-8')), 'foo-bar')
self.assertEqual(slugify('Foo (Bar)'.decode('utf-8')), 'foo-bar')
class BuildAbsoluteURITest(unittest.TestCase):
def setUp(self):
self.host = 'http://foobar.com'
def tearDown(self):
self.host = None
def test_path_none(self):
self.assertEqual(build_absolute_uri(self.host), self.host)
def test_path_empty(self):
self.assertEqual(build_absolute_uri(self.host, ''), self.host)
def test_path_http(self):
self.assertEqual(build_absolute_uri(self.host, 'http://barfoo.com'),
'http://barfoo.com')
def test_path_https(self):
self.assertEqual(build_absolute_uri(self.host, 'https://barfoo.com'),
'https://barfoo.com')
def test_host_ends_with_slash_and_path_starts_with_slash(self):
self.assertEqual(build_absolute_uri(self.host + '/', '/foo/bar'),
'http://foobar.com/foo/bar')
def test_absolute_uri(self):
self.assertEqual(build_absolute_uri(self.host, '/foo/bar'),
'http://foobar.com/foo/bar')
class PartialPipelineData(unittest.TestCase):
def test_kwargs_included_in_result(self):
backend = self._backend()
key, val = ('foo', 'bar')
_, xkwargs = partial_pipeline_data(backend, None,
*(), **dict([(key, val)]))
self.assertTrue(key in xkwargs)
self.assertEqual(xkwargs[key], val)
def test_update_user(self):
user = object()
backend = self._backend(session_kwargs={'user': None})
_, xkwargs = partial_pipeline_data(backend, user)
self.assertTrue('user' in xkwargs)
self.assertEqual(xkwargs['user'], user)
def _backend(self, session_kwargs=None):
strategy = Mock()
strategy.request = None
strategy.session_get.return_value = object()
strategy.partial_from_session.return_value = \
(0, 'mock-backend', [], session_kwargs or {})
backend = Mock()
backend.name = 'mock-backend'
backend.strategy = strategy
return backend
|
f0rki/cb-multios | refs/heads/master | original-challenges/HighFrequencyTradingAlgo/poller/for-testing/machine.py | 1 | #!/usr/bin/env python
#
# Copyright (C) 2014 Narf Industries <info@narfindustries.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from generator.actions import Actions
import string
import random
import itertools
import math
from struct import *
class TemplateGenerator(Actions):
def start(self):
starting_balance = 1000
record_str = pack('HH', 0, starting_balance)
self.write(record_str)
dataset = {'sampleSize' : 0.0, 'mean' : 0.0, 'Q' : 0.0, 'variance' : 0.0, 'stdDev' : 0.0}
for i in range(0, 500):
while True:
record = [random.normalvariate(50.0, 10), random.normalvariate(50.0, 10)]
if (record[0] < 65000 and record[0] >= 1 and record[1] < 65000 and record[1] >= 1):
break
dataset['sampleSize'] += 1
priceRelative = record[0]/record[1]
oldMean = dataset['mean']
dataset['mean'] = oldMean + (priceRelative - oldMean) / dataset['sampleSize']
dataset['Q'] = dataset['Q'] + (priceRelative - oldMean) * (priceRelative - dataset['mean'])
dataset['variance'] = dataset['Q'] / dataset['sampleSize']
dataset['stdDev'] = math.sqrt(dataset['variance'])
record_str = pack('HH', int(record[0]), int(record[1]))
self.write(record_str)
for i in range(0,500):
minRange = dataset['mean'] + dataset['stdDev']*2
maxRange = dataset['mean'] + dataset['stdDev']*3
priceRelative = random.uniform(minRange, maxRange)
firstStock = random.uniform(40.0, 50.0)
secondStock = firstStock/priceRelative - .1
record = [firstStock, secondStock]
dataset['sampleSize'] += 1
oldMean = dataset['mean']
dataset['Q'] = dataset['Q'] + (priceRelative - oldMean) * (priceRelative - dataset['mean'])
dataset['variance'] = dataset['Q'] / dataset['sampleSize']
dataset['stdDev'] = math.sqrt(dataset['variance'])
record_str = pack('HH', record[0], record[1])
self.write(record_str)
for i in range(0,500):
minRange = dataset['mean'] + dataset['stdDev']
maxRange = dataset['mean'] + dataset['stdDev']*2
priceRelative = random.uniform(minRange, maxRange)
firstStock = random.uniform(40.0, 50.0)
secondStock = firstStock/priceRelative - .1
record = [secondStock, firstStock]
dataset['sampleSize'] += 1
oldMean = dataset['mean']
dataset['Q'] = dataset['Q'] + (priceRelative - oldMean) * (priceRelative - dataset['mean'])
dataset['variance'] = dataset['Q'] / dataset['sampleSize']
dataset['stdDev'] = math.sqrt(dataset['variance'])
record_str = pack('HH', record[0], record[1])
self.write(record_str)
record_str = pack('hh', -1, -1)
self.write(record_str)
self.read(delim="\n", expect="You doubled your money!")
def quit(self):
pass
|
gavinmh/keras | refs/heads/master | tests/auto/test_loss_weighting.py | 31 | from __future__ import absolute_import
from __future__ import print_function
import numpy as np
np.random.seed(1336) # for reproducibility
from keras.datasets import mnist
from keras.models import Sequential, Graph
from keras.layers.core import Dense, Activation
from keras.utils import np_utils
import unittest
nb_classes = 10
batch_size = 128
nb_epoch = 5
weighted_class = 9
standard_weight = 1
high_weight = 5
max_train_samples = 5000
max_test_samples = 1000
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)[:max_train_samples]
X_test = X_test.reshape(10000, 784)[:max_test_samples]
X_train = X_train.astype("float32") / 255
X_test = X_test.astype("float32") / 255
# convert class vectors to binary class matrices
y_train = y_train[:max_train_samples]
y_test = y_test[:max_test_samples]
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
test_ids = np.where(y_test == np.array(weighted_class))[0]
class_weight = dict([(i, standard_weight) for i in range(nb_classes)])
class_weight[weighted_class] = high_weight
sample_weight = np.ones((y_train.shape[0])) * standard_weight
sample_weight[y_train == weighted_class] = high_weight
def create_sequential_model():
model = Sequential()
model.add(Dense(784, 50))
model.add(Activation('relu'))
model.add(Dense(50, 10))
model.add(Activation('softmax'))
return model
def create_graph_model():
model = Graph()
model.add_input(name='input')
model.add_node(Dense(784, 50, activation='relu'), name='d1', input='input')
model.add_node(Dense(50, 10, activation='softmax'), name='d2', input='d1')
model.add_output(name='output', input='d2')
return model
def _test_weights_sequential(model, class_weight=None, sample_weight=None):
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0,
class_weight=class_weight, sample_weight=sample_weight)
model.train_on_batch(X_train[:32], Y_train[:32],
class_weight=class_weight, sample_weight=sample_weight[:32] if sample_weight is not None else None)
model.test_on_batch(X_train[:32], Y_train[:32],
sample_weight=sample_weight[:32] if sample_weight is not None else None)
score = model.evaluate(X_test[test_ids, :], Y_test[test_ids, :], verbose=0)
return score
def _test_weights_graph(model, class_weight=None, sample_weight=None):
model.fit({'input': X_train, 'output': Y_train}, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0,
class_weight={'output': class_weight}, sample_weight={'output': sample_weight})
model.train_on_batch({'input': X_train[:32], 'output': Y_train[:32]},
class_weight={'output': class_weight}, sample_weight={'output': sample_weight[:32] if sample_weight is not None else None})
model.test_on_batch({'input': X_train[:32], 'output': Y_train[:32]},
sample_weight={'output': sample_weight[:32] if sample_weight is not None else None})
score = model.evaluate({'input': X_test[test_ids, :], 'output': Y_test[test_ids, :]}, verbose=0)
return score
class TestLossWeighting(unittest.TestCase):
def test_sequential(self):
for loss in ['mae', 'mse', 'categorical_crossentropy']:
print('loss:', loss)
print('sequential')
# no weights: reference point
model = create_sequential_model()
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
standard_score = _test_weights_sequential(model)
# test class_weight
model = create_sequential_model()
model.compile(loss=loss, optimizer='rmsprop')
score = _test_weights_sequential(model, class_weight=class_weight)
print('score:', score, ' vs.', standard_score)
self.assertTrue(score < standard_score)
# test sample_weight
model = create_sequential_model()
model.compile(loss=loss, optimizer='rmsprop')
score = _test_weights_sequential(model, sample_weight=sample_weight)
print('score:', score, ' vs.', standard_score)
self.assertTrue(score < standard_score)
def test_graph(self):
for loss in ['mae', 'mse', 'categorical_crossentropy']:
print('loss:', loss)
print('graph')
# no weights: reference point
model = create_graph_model()
model.compile(loss={'output': 'categorical_crossentropy'}, optimizer='rmsprop')
standard_score = _test_weights_graph(model)
# test class_weight
model = create_graph_model()
model.compile(loss={'output': 'categorical_crossentropy'}, optimizer='rmsprop')
score = _test_weights_graph(model, class_weight=class_weight)
print('score:', score, ' vs.', standard_score)
self.assertTrue(score < standard_score)
# test sample_weight
model = create_graph_model()
model.compile(loss={'output': 'categorical_crossentropy'}, optimizer='rmsprop')
score = _test_weights_graph(model, sample_weight=sample_weight)
print('score:', score, ' vs.', standard_score)
self.assertTrue(score < standard_score)
if __name__ == '__main__':
print('Test class_weight and sample_weight')
unittest.main()
|
talhaobject90/HUNGRY-OBJECT90 | refs/heads/master | dashboard (copy)/reviews/app.py | 49 | from django.conf.urls import url
from oscar.core.application import Application
from oscar.core.loading import get_class
class ReviewsApplication(Application):
name = None
default_permissions = ['is_staff', ]
list_view = get_class('dashboard.reviews.views', 'ReviewListView')
update_view = get_class('dashboard.reviews.views', 'ReviewUpdateView')
delete_view = get_class('dashboard.reviews.views', 'ReviewDeleteView')
def get_urls(self):
urls = [
url(r'^$', self.list_view.as_view(), name='reviews-list'),
url(r'^(?P<pk>\d+)/$', self.update_view.as_view(),
name='reviews-update'),
url(r'^(?P<pk>\d+)/delete/$', self.delete_view.as_view(),
name='reviews-delete'),
]
return self.post_process_urls(urls)
application = ReviewsApplication()
|
liquex/gameone.de-on-XBMC | refs/heads/master | default.py | 1 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os, re, sys
import urllib, urllib2, HTMLParser
import xbmcgui, xbmcplugin, xbmcaddon
import xml.etree.ElementTree as ET
pluginhandle = int(sys.argv[1])
addon = xbmcaddon.Addon()
pluginid = addon.getAddonInfo('id')
translation = addon.getLocalizedString
path_plugin = xbmc.translatePath(addon.getAddonInfo('path')).decode("utf-8")
path_icon = path_plugin + '/icon.png'
user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0'
url_base = 'http://www.gameone.de'
url_year = url_base + '/tv/year/'
url_podcast = url_base + '/feed/podcast.xml'
url_episode = 'http://www.gameone.de/api/mrss/mgid%3Agameone%3Avideo%3Amtvnn.com%3Atv_show-'
url_swf = 'http://www.gameone.de/flash/g2player_2.1.9.beta3.swf'
def log(message, lvl=xbmc.LOGNOTICE):
message = (pluginid + ': %s' % message).encode('utf-8')
xbmc.log(msg=message, level=lvl)
def parse_html_string(string):
parser = HTMLParser.HTMLParser()
string = parser.unescape(string)
return string
def build_url(string):
return sys.argv[0] + '?' + urllib.urlencode(string)
def get_parameters(string):
''' Convert parameters in a URL to a dict. '''
parameters = {}
if string:
if (string[:1] == '?'):
parameter_pairs = string[1:].split('&')
for i in parameter_pairs:
parameter_split = i.split('=')
if (len(parameter_split) == 2):
parameters[parameter_split[0]] = urllib.unquote(parameter_split[1])
else:
log('Couldn\'t split parameters correctly (wrong amount of array elements) [Elements: ' + len(parameter_split) + ' | String:' + string + ']', xbmc.LOGERROR)
else:
url_split = string.split('?')
if (len(url_split) == 2):
parameter_pairs = url_split[1].split('&')
for i in parameter_pairs:
parameter_split = i.split('=')
if (len(parameter_split) == 2):
parameters[parameter_split[0]] = urllib.unquote(parameter_split[1])
else:
log('Couldn\'t split parameters correctly (wrong amount of array elements) [Elements: ' + len(parameter_split) + ' | String:' + string + ']', xbmc.LOGERROR)
else:
log('Couldn\'t split parameters correctly (wrong amount of array elements) [Elements: ' + len(parameter_split) + ' | String:' + string + ']', xbmc.LOGERROR)
return parameters
def parse_content(string, pattern=False, dotall=False):
''' Extract contents with regex from either a website or a comitted string. If no regex pattern is passed, the whole content is returned. '''
log('Start parsing content...', xbmc.LOGDEBUG)
if (len(re.findall('http[s]?://', string[:8])) >= 1 or string[:4] == 'www.'):
log('URL passed, scraping from URL: ' + string, xbmc.LOGDEBUG)
req = urllib2.Request(string)
req.add_header('User-Agent', user_agent)
response = urllib2.urlopen(req)
content = response.read()
response.close()
if isinstance(content,str):
content = content.decode('utf-8')
else:
log('Content passed, skip scraping...', xbmc.LOGDEBUG)
content = string
if (pattern != False):
log('Expression: ' + str(pattern), xbmc.LOGDEBUG)
if (dotall == True):
match = re.compile(pattern, re.DOTALL).findall(content)
else:
match = re.compile(pattern).findall(content)
log(str(len(match)) + ' matches', xbmc.LOGDEBUG)
return match
else:
log('No expression found, returning whole content.', xbmc.LOGDEBUG)
return content
def add_menu_item(type, name, url, mode, thumbIMG='', fanart=''):
''' Add an item to the XBMC GUI. '''
if not thumbIMG and addon.getSetting(id='showlogo') == 'true':
thumbIMG = path_icon
name = parse_html_string(name)
if (type == 'ITEMTYPE_DIRECTORY' or type == 'ITEMTYPE_DUMMY_DIR'):
iconIMG = 'DefaultFolder.png'
elif (type == 'ITEMTYPE_VIDEO' or type == 'ITEMTYPE_DUMMY_VID'):
iconIMG = 'DefaultVideo.png'
list_item = xbmcgui.ListItem(name, iconImage=iconIMG, thumbnailImage=thumbIMG)
#list_item.setInfo( type="Video", infoLabels={ "Title": name } )
list_item.setProperty('fanart_image', fanart)
if (type == 'ITEMTYPE_VIDEO'):
list_item.setProperty('Video', 'true')
list_item.setProperty('IsPlayable', 'true')
url = sys.argv[0] + '?mode=' + str(mode) + '&url=' + urllib.quote_plus(url)
if (type == 'ITEMTYPE_DIRECTORY'):
return xbmcplugin.addDirectoryItem(handle=pluginhandle, url=url, listitem=list_item, isFolder=True)
else:
return xbmcplugin.addDirectoryItem(handle=pluginhandle, url=url, listitem=list_item)
class plugin_structure():
def show_menu_root(self):
add_menu_item('ITEMTYPE_DIRECTORY', translation(30101), url_base + '/tv', 'show_menu_tv')
add_menu_item('ITEMTYPE_DIRECTORY', translation(30102), url_base + '/blog', 'show_menu_blog')
add_menu_item('ITEMTYPE_DIRECTORY', translation(30104), url_podcast, 'show_menu_podcasts')
if addon.getSetting(id='showsettings') == 'true':
add_menu_item('ITEMTYPE_DUMMY_DIR', translation(30100), '', 'show_settings')
xbmcplugin.endOfDirectory(handle=pluginhandle)
def show_settings(self):
return xbmc.executebuiltin('Addon.OpenSettings(' + pluginid + ')')
#CATEGORY: TV
def show_menu_tv(self):
log('Indexing years of TV episodes', xbmc.LOGDEBUG)
match_years = parse_content(url, '<a href="/tv\?year=([0-9]{4})">[0-9]{4}</a>', True)
for year in match_years:
add_menu_item('ITEMTYPE_DIRECTORY', year, url_year + year, 'show_menu_tv_episodes')
xbmcplugin.endOfDirectory(handle=pluginhandle)
def show_menu_tv_episodes(self):
log('Indexing TV episodes: ' + url, xbmc.LOGDEBUG)
match_episodes = parse_content(url, '''<a href="/tv/([0-9]+)" class="image_link"><img.+?/><noscript><img src="(.+?)".+?/></noscript></a>\n<h5>\n<a href=\'.+?\' title=['"](.+?)['"]>''', True)
for episode,thumbnail,title in match_episodes:
title = translation(30002) + ' ' + episode + ' - ' + title
add_menu_item('ITEMTYPE_VIDEO', title, url_episode + episode, 'play_tv_episode', thumbnail)
xbmcplugin.endOfDirectory(handle=pluginhandle)
def play_tv_episode(self):
log('Playing TV episode: ' + url, xbmc.LOGNOTICE)
match_video_xml = parse_content(url, "<media:content.+?url='(.+?)'></media:content>")
for video_xml_url in match_video_xml:
match_video = str(parse_content(video_xml_url))
xml_root = ET.fromstring(match_video)
dict_resolutions = {}
for stream in xml_root.findall('./video/item/rendition'):
dict_resolutions[int(stream.attrib.get('height'))] = stream.find('src').text
log('Selecting stream quality: ' + str(resolutions.SETTINGS.get(int(addon.getSetting(id='videoquality')))), xbmc.LOGDEBUG)
video_file = dict_resolutions.get(resolutions.SETTINGS.get(int(addon.getSetting(id='videoquality'))))
if video_file == None:
log('Couldn\'t select stream quality: ' + str(resolutions.SETTINGS.get(int(addon.getSetting(id='videoquality')))) + ', falling back to highest.', xbmc.LOGNOTICE)
video_file = dict_resolutions[max(dict_resolutions)]
video_url = video_file + ' swfurl=' + url_swf + ' swfvfy=true' + ' pageUrl=www.gameone.de app=ondemand?ovpfv=2.1.4'
item = xbmcgui.ListItem(path=video_url)
return xbmcplugin.setResolvedUrl(pluginhandle, True, item)
xbmcplugin.endOfDirectory(handle=pluginhandle)
#CATEGORY: BLOG
def show_menu_blog(self):
log('Indexing blog categories: ' + url, xbmc.LOGDEBUG)
add_menu_item('ITEMTYPE_DIRECTORY', translation(30200), url, 'show_menu_blog_entries')
match_teasers = parse_content(url, '<ul class="teasers">(.+?)</ul>', True)
for teaser in match_teasers:
match_categories = parse_content(teaser, '<a title="(.+?)" href="(.+?)">.+?<img.+?src="(.+?)"', True)
for category,url_category,thumbnail in match_categories:
add_menu_item('ITEMTYPE_DIRECTORY', category, url_base + url_category, 'show_menu_blog_entries', thumbnail)
xbmcplugin.endOfDirectory(handle=pluginhandle)
def show_menu_blog_entries(self):
log('Indexing blog entries: ' + url, xbmc.LOGDEBUG)
match_posts = parse_content(url, '<li class="post teaser_box teaser".+?<div class=\'overlay\'.+?<a href="(.+?)">(.+?)</a>.+?<a class=\'image_link\' href=\'.+?\'>\n<img .+?src="(.+?)"', True)
match_next = parse_content(url, '<a class="next_page" rel="next" href="(.+?)">', True)
for post_url,title,thumbnail in match_posts:
match_content = parse_content(url_base + post_url)
match_videoposts = parse_content(match_content, '<div class="player_swf".+?', True)
match_blogpages = parse_content(match_content, '<a class="forwards" href="(.+?)">', True)
video_amount = len(match_videoposts)
pages_amount = len(match_blogpages)
if video_amount == 1:
if pages_amount == 0:
match_video_id = parse_content(match_content, 'video_meta-(.+?)"')
add_menu_item('ITEMTYPE_VIDEO', title, match_video_id[0], 'play_blog_video', thumbnail)
else:
add_menu_item('ITEMTYPE_DIRECTORY', title, url_base + post_url, 'show_menu_blog_videos', thumbnail)
elif video_amount>1:
add_menu_item('ITEMTYPE_DIRECTORY', title, url_base + post_url, 'show_menu_blog_videos', thumbnail)
for url_next in match_next:
add_menu_item('ITEMTYPE_DIRECTORY', translation(30001), url_base + url_next, 'show_menu_blog_entries')
xbmcplugin.endOfDirectory(handle=pluginhandle)
def show_menu_blog_videos(self):
log('Indexing blog videos: ' + url, xbmc.LOGDEBUG)
match_content = parse_content(url)
match_video = parse_content(match_content, 'video_meta-(.+?)"')
match_thumb = parse_content(match_content, '"image", "(.+?)"', True)
match_title = parse_content(match_content, '<p><strong>(.+?)</strong>', True)
match_next = parse_content(match_content, '<a class="forwards" href="(.+?)">')
i = 0
for video_id in match_video:
try: title = match_title[i]
except: title = translation(30003)
if title[-1:] == ':':
title = title[:-1]
add_menu_item('ITEMTYPE_VIDEO', title, video_id, 'play_blog_video', match_thumb[i])
i = i + 1
for url_next in match_next:
add_menu_item('ITEMTYPE_DIRECTORY', translation(30001), url_next, 'show_menu_blog_videos')
xbmcplugin.endOfDirectory(handle=pluginhandle)
def play_blog_video(self):
log('Playing blog video: ' + url, xbmc.LOGNOTICE)
url_video = self.get_video(url)
item = xbmcgui.ListItem(path=url_video)
xbmcplugin.setResolvedUrl(pluginhandle, True, item)
xbmcplugin.endOfDirectory(handle=pluginhandle)
#CATEGORY: PODCASTS
def show_menu_podcasts(self):
log('Indexing podcasts: ' + url)
match_content = parse_content(url, '</image>.+?</rss>', True)[0]
match_podcasts = parse_content(match_content, '<title>(.+?)</title>.+?<feedburner:origLink>(.+?)</feedburner:origLink>', True)
for title,url_podcast in match_podcasts:
add_menu_item('ITEMTYPE_VIDEO', title, url_podcast, 'play_media')
xbmcplugin.endOfDirectory(handle=pluginhandle)
#GENERAL FUNCTIONS:
def play_media(self, url_media=''):
log('Playing media: ' + url_media, xbmc.LOGNOTICE)
if not url_media:
url_media = url
item = xbmcgui.ListItem(path=url_media)
xbmcplugin.setResolvedUrl(pluginhandle, True, item)
#xbmc.Player().play(url_media, item)
def get_video(self, video_id):
log('Scraping video ID: ' + url, xbmc.LOGDEBUG)
match_video = str(parse_content('http://riptide.mtvnn.com/mediagen/' + video_id))
xml_root = ET.fromstring(match_video)
dict_resolutions = {}
for stream in xml_root.findall('./video/item/rendition'):
dict_resolutions[int(stream.attrib.get('height'))] = stream.find('src').text
log('Selecting stream quality: ' + str(resolutions.SETTINGS.get(int(addon.getSetting(id='videoquality')))), xbmc.LOGDEBUG)
video_file = dict_resolutions.get(resolutions.SETTINGS.get(int(addon.getSetting(id='videoquality'))))
if video_file == None:
log('Couldn\'t select stream quality: ' + str(resolutions.SETTINGS.get(int(addon.getSetting(id='videoquality')))) + ', falling back to highest.', xbmc.LOGNOTICE)
video_file = dict_resolutions[max(dict_resolutions)]
return video_file + ' swfurl=' + url_swf + ' swfvfy=true' + ' pageUrl=www.gameone.de app=ondemand?ovpfv=2.1.4'
class resolutions():
''' This class shouldn't be instantiated. '''
SETTINGS = { 0 : 270,
1 : 360,
2 : 720 }
# Get parameters
parameters = get_parameters(sys.argv[2])
url = parameters.get('url')
mode = parameters.get('mode')
navigate = plugin_structure()
if not sys.argv[2]:
navigate.show_menu_root()
else:
try:
mode_splitted = mode.split('!')
call_func = getattr(navigate,mode_splitted[0])
try:
call_func(mode_splitted[1])
except:
call_func()
except:
log('Error: Failed executing function! [Mode: ' + mode + ']', xbmc.LOGERROR)
|
Mirantis/swift-encrypt | refs/heads/master | swift/account/server.py | 2 | # Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import os
import time
import traceback
from xml.sax import saxutils
from eventlet import Timeout
import swift.common.db
from swift.common.db import AccountBroker
from swift.common.utils import get_logger, get_param, hash_path, public, \
normalize_timestamp, storage_directory, config_true_value, \
validate_device_partition, json, timing_stats
from swift.common.constraints import ACCOUNT_LISTING_LIMIT, \
check_mount, check_float, check_utf8, FORMAT2CONTENT_TYPE
from swift.common.db_replicator import ReplicatorRpc
from swift.common.swob import HTTPAccepted, HTTPBadRequest, \
HTTPCreated, HTTPForbidden, HTTPInternalServerError, \
HTTPMethodNotAllowed, HTTPNoContent, HTTPNotFound, \
HTTPPreconditionFailed, HTTPConflict, Request, Response, \
HTTPInsufficientStorage, HTTPNotAcceptable
DATADIR = 'accounts'
class AccountController(object):
"""WSGI controller for the account server."""
def __init__(self, conf):
self.logger = get_logger(conf, log_route='account-server')
self.root = conf.get('devices', '/srv/node')
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.replicator_rpc = ReplicatorRpc(self.root, DATADIR, AccountBroker,
self.mount_check,
logger=self.logger)
self.auto_create_account_prefix = \
conf.get('auto_create_account_prefix') or '.'
swift.common.db.DB_PREALLOCATION = \
config_true_value(conf.get('db_preallocation', 'f'))
def _get_account_broker(self, drive, part, account):
hsh = hash_path(account)
db_dir = storage_directory(DATADIR, part, hsh)
db_path = os.path.join(self.root, drive, db_dir, hsh + '.db')
return AccountBroker(db_path, account=account, logger=self.logger)
@public
@timing_stats()
def DELETE(self, req):
"""Handle HTTP DELETE request."""
try:
drive, part, account = req.split_path(3)
validate_device_partition(drive, part)
except ValueError, err:
return HTTPBadRequest(body=str(err), content_type='text/plain',
request=req)
if self.mount_check and not check_mount(self.root, drive):
return HTTPInsufficientStorage(drive=drive, request=req)
if 'x-timestamp' not in req.headers or \
not check_float(req.headers['x-timestamp']):
return HTTPBadRequest(body='Missing timestamp', request=req,
content_type='text/plain')
broker = self._get_account_broker(drive, part, account)
if broker.is_deleted():
return HTTPNotFound(request=req)
broker.delete_db(req.headers['x-timestamp'])
return HTTPNoContent(request=req)
@public
@timing_stats()
def PUT(self, req):
"""Handle HTTP PUT request."""
try:
drive, part, account, container = req.split_path(3, 4)
validate_device_partition(drive, part)
except ValueError, err:
return HTTPBadRequest(body=str(err), content_type='text/plain',
request=req)
if self.mount_check and not check_mount(self.root, drive):
return HTTPInsufficientStorage(drive=drive, request=req)
broker = self._get_account_broker(drive, part, account)
if container: # put account container
if 'x-trans-id' in req.headers:
broker.pending_timeout = 3
if account.startswith(self.auto_create_account_prefix) and \
not os.path.exists(broker.db_file):
broker.initialize(normalize_timestamp(
req.headers.get('x-timestamp') or time.time()))
if req.headers.get('x-account-override-deleted', 'no').lower() != \
'yes' and broker.is_deleted():
return HTTPNotFound(request=req)
broker.put_container(container, req.headers['x-put-timestamp'],
req.headers['x-delete-timestamp'],
req.headers['x-object-count'],
req.headers['x-bytes-used'])
if req.headers['x-delete-timestamp'] > \
req.headers['x-put-timestamp']:
return HTTPNoContent(request=req)
else:
return HTTPCreated(request=req)
else: # put account
timestamp = normalize_timestamp(req.headers['x-timestamp'])
if not os.path.exists(broker.db_file):
broker.initialize(timestamp)
created = True
elif broker.is_status_deleted():
return HTTPForbidden(request=req, body='Recently deleted')
else:
created = broker.is_deleted()
broker.update_put_timestamp(timestamp)
if broker.is_deleted():
return HTTPConflict(request=req)
metadata = {}
metadata.update((key, (value, timestamp))
for key, value in req.headers.iteritems()
if key.lower().startswith('x-account-meta-'))
if metadata:
broker.update_metadata(metadata)
if created:
return HTTPCreated(request=req)
else:
return HTTPAccepted(request=req)
@public
@timing_stats()
def HEAD(self, req):
"""Handle HTTP HEAD request."""
try:
drive, part, account = req.split_path(3)
validate_device_partition(drive, part)
except ValueError, err:
return HTTPBadRequest(body=str(err), content_type='text/plain',
request=req)
if self.mount_check and not check_mount(self.root, drive):
return HTTPInsufficientStorage(drive=drive, request=req)
broker = self._get_account_broker(drive, part, account)
broker.pending_timeout = 0.1
broker.stale_reads_ok = True
if broker.is_deleted():
return HTTPNotFound(request=req)
info = broker.get_info()
headers = {
'X-Account-Container-Count': info['container_count'],
'X-Account-Object-Count': info['object_count'],
'X-Account-Bytes-Used': info['bytes_used'],
'X-Timestamp': info['created_at'],
'X-PUT-Timestamp': info['put_timestamp']}
headers.update((key, value)
for key, (value, timestamp) in
broker.metadata.iteritems() if value != '')
if get_param(req, 'format'):
req.accept = FORMAT2CONTENT_TYPE.get(
get_param(req, 'format').lower(), FORMAT2CONTENT_TYPE['plain'])
headers['Content-Type'] = req.accept.best_match(
['text/plain', 'application/json', 'application/xml', 'text/xml'])
if not headers['Content-Type']:
return HTTPNotAcceptable(request=req)
return HTTPNoContent(request=req, headers=headers, charset='utf-8')
@public
@timing_stats()
def GET(self, req):
"""Handle HTTP GET request."""
try:
drive, part, account = req.split_path(3)
validate_device_partition(drive, part)
except ValueError, err:
return HTTPBadRequest(body=str(err), content_type='text/plain',
request=req)
if self.mount_check and not check_mount(self.root, drive):
return HTTPInsufficientStorage(drive=drive, request=req)
broker = self._get_account_broker(drive, part, account)
broker.pending_timeout = 0.1
broker.stale_reads_ok = True
if broker.is_deleted():
return HTTPNotFound(request=req)
info = broker.get_info()
resp_headers = {
'X-Account-Container-Count': info['container_count'],
'X-Account-Object-Count': info['object_count'],
'X-Account-Bytes-Used': info['bytes_used'],
'X-Timestamp': info['created_at'],
'X-PUT-Timestamp': info['put_timestamp']}
resp_headers.update((key, value)
for key, (value, timestamp) in
broker.metadata.iteritems() if value != '')
try:
prefix = get_param(req, 'prefix')
delimiter = get_param(req, 'delimiter')
if delimiter and (len(delimiter) > 1 or ord(delimiter) > 254):
# delimiters can be made more flexible later
return HTTPPreconditionFailed(body='Bad delimiter')
limit = ACCOUNT_LISTING_LIMIT
given_limit = get_param(req, 'limit')
if given_limit and given_limit.isdigit():
limit = int(given_limit)
if limit > ACCOUNT_LISTING_LIMIT:
return HTTPPreconditionFailed(request=req,
body='Maximum limit is %d' %
ACCOUNT_LISTING_LIMIT)
marker = get_param(req, 'marker', '')
end_marker = get_param(req, 'end_marker')
query_format = get_param(req, 'format')
except UnicodeDecodeError, err:
return HTTPBadRequest(body='parameters not utf8',
content_type='text/plain', request=req)
if query_format:
req.accept = FORMAT2CONTENT_TYPE.get(query_format.lower(),
FORMAT2CONTENT_TYPE['plain'])
out_content_type = req.accept.best_match(
['text/plain', 'application/json', 'application/xml', 'text/xml'])
if not out_content_type:
return HTTPNotAcceptable(request=req)
account_list = broker.list_containers_iter(limit, marker, end_marker,
prefix, delimiter)
if out_content_type == 'application/json':
data = []
for (name, object_count, bytes_used, is_subdir) in account_list:
if is_subdir:
data.append({'subdir': name})
else:
data.append({'name': name, 'count': object_count,
'bytes': bytes_used})
account_list = json.dumps(data)
elif out_content_type.endswith('/xml'):
output_list = ['<?xml version="1.0" encoding="UTF-8"?>',
'<account name="%s">' % account]
for (name, object_count, bytes_used, is_subdir) in account_list:
name = saxutils.escape(name)
if is_subdir:
output_list.append('<subdir name="%s" />' % name)
else:
item = '<container><name>%s</name><count>%s</count>' \
'<bytes>%s</bytes></container>' % \
(name, object_count, bytes_used)
output_list.append(item)
output_list.append('</account>')
account_list = '\n'.join(output_list)
else:
if not account_list:
return HTTPNoContent(request=req, headers=resp_headers)
account_list = '\n'.join(r[0] for r in account_list) + '\n'
ret = Response(body=account_list, request=req, headers=resp_headers)
ret.content_type = out_content_type
ret.charset = 'utf-8'
return ret
@public
@timing_stats()
def REPLICATE(self, req):
"""
Handle HTTP REPLICATE request.
Handler for RPC calls for account replication.
"""
try:
post_args = req.split_path(3)
drive, partition, hash = post_args
validate_device_partition(drive, partition)
except ValueError, err:
return HTTPBadRequest(body=str(err), content_type='text/plain',
request=req)
if self.mount_check and not check_mount(self.root, drive):
return HTTPInsufficientStorage(drive=drive, request=req)
try:
args = json.load(req.environ['wsgi.input'])
except ValueError, err:
return HTTPBadRequest(body=str(err), content_type='text/plain')
ret = self.replicator_rpc.dispatch(post_args, args)
ret.request = req
return ret
@public
@timing_stats()
def POST(self, req):
"""Handle HTTP POST request."""
try:
drive, part, account = req.split_path(3)
validate_device_partition(drive, part)
except ValueError, err:
return HTTPBadRequest(body=str(err), content_type='text/plain',
request=req)
if 'x-timestamp' not in req.headers or \
not check_float(req.headers['x-timestamp']):
return HTTPBadRequest(body='Missing or bad timestamp',
request=req,
content_type='text/plain')
if self.mount_check and not check_mount(self.root, drive):
return HTTPInsufficientStorage(drive=drive, request=req)
broker = self._get_account_broker(drive, part, account)
if broker.is_deleted():
return HTTPNotFound(request=req)
timestamp = normalize_timestamp(req.headers['x-timestamp'])
metadata = {}
metadata.update((key, (value, timestamp))
for key, value in req.headers.iteritems()
if key.lower().startswith('x-account-meta-'))
if metadata:
broker.update_metadata(metadata)
return HTTPNoContent(request=req)
def __call__(self, env, start_response):
start_time = time.time()
req = Request(env)
self.logger.txn_id = req.headers.get('x-trans-id', None)
if not check_utf8(req.path_info):
res = HTTPPreconditionFailed(body='Invalid UTF8 or contains NULL')
else:
try:
# disallow methods which are not publicly accessible
try:
method = getattr(self, req.method)
getattr(method, 'publicly_accessible')
except AttributeError:
res = HTTPMethodNotAllowed()
else:
res = method(req)
except (Exception, Timeout):
self.logger.exception(_('ERROR __call__ error with %(method)s'
' %(path)s '),
{'method': req.method, 'path': req.path})
res = HTTPInternalServerError(body=traceback.format_exc())
trans_time = '%.4f' % (time.time() - start_time)
additional_info = ''
if res.headers.get('x-container-timestamp') is not None:
additional_info += 'x-container-timestamp: %s' % \
res.headers['x-container-timestamp']
log_message = '%s - - [%s] "%s %s" %s %s "%s" "%s" "%s" %s "%s"' % (
req.remote_addr,
time.strftime('%d/%b/%Y:%H:%M:%S +0000', time.gmtime()),
req.method, req.path,
res.status.split()[0], res.content_length or '-',
req.headers.get('x-trans-id', '-'),
req.referer or '-', req.user_agent or '-',
trans_time,
additional_info)
if req.method.upper() == 'REPLICATE':
self.logger.debug(log_message)
else:
self.logger.info(log_message)
return res(env, start_response)
def app_factory(global_conf, **local_conf):
"""paste.deploy app factory for creating WSGI account server apps"""
conf = global_conf.copy()
conf.update(local_conf)
return AccountController(conf)
|
scottdangelo/RemoveVolumeMangerLocks | refs/heads/RemoveVolumeManagerLocks | cinder/api/v2/types.py | 17 | # Copyright (c) 2011 Zadara Storage Inc.
# Copyright (c) 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The volume type & volume types extra specs extension."""
from oslo_utils import strutils
from webob import exc
from cinder.api.openstack import wsgi
from cinder.api.views import types as views_types
from cinder.api import xmlutil
from cinder import exception
from cinder.i18n import _
from cinder import utils
from cinder.volume import volume_types
def make_voltype(elem):
elem.set('id')
elem.set('name')
elem.set('description')
extra_specs = xmlutil.make_flat_dict('extra_specs', selector='extra_specs')
elem.append(extra_specs)
class VolumeTypeTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volume_type', selector='volume_type')
make_voltype(root)
return xmlutil.MasterTemplate(root, 1)
class VolumeTypesTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volume_types')
elem = xmlutil.SubTemplateElement(root, 'volume_type',
selector='volume_types')
make_voltype(elem)
return xmlutil.MasterTemplate(root, 1)
class VolumeTypesController(wsgi.Controller):
"""The volume types API controller for the OpenStack API."""
_view_builder_class = views_types.ViewBuilder
@wsgi.serializers(xml=VolumeTypesTemplate)
def index(self, req):
"""Returns the list of volume types."""
limited_types = self._get_volume_types(req)
req.cache_resource(limited_types, name='types')
return self._view_builder.index(req, limited_types)
@wsgi.serializers(xml=VolumeTypeTemplate)
def show(self, req, id):
"""Return a single volume type item."""
context = req.environ['cinder.context']
# get default volume type
if id is not None and id == 'default':
vol_type = volume_types.get_default_volume_type()
if not vol_type:
msg = _("Default volume type can not be found.")
raise exc.HTTPNotFound(explanation=msg)
req.cache_resource(vol_type, name='types')
else:
try:
vol_type = volume_types.get_volume_type(context, id)
req.cache_resource(vol_type, name='types')
except exception.VolumeTypeNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
return self._view_builder.show(req, vol_type)
def _parse_is_public(self, is_public):
"""Parse is_public into something usable.
* True: List public volume types only
* False: List private volume types only
* None: List both public and private volume types
"""
if is_public is None:
# preserve default value of showing only public types
return True
elif utils.is_none_string(is_public):
return None
else:
try:
return strutils.bool_from_string(is_public, strict=True)
except ValueError:
msg = _('Invalid is_public filter [%s]') % is_public
raise exc.HTTPBadRequest(explanation=msg)
def _get_volume_types(self, req):
"""Helper function that returns a list of type dicts."""
filters = {}
context = req.environ['cinder.context']
if context.is_admin:
# Only admin has query access to all volume types
filters['is_public'] = self._parse_is_public(
req.params.get('is_public', None))
else:
filters['is_public'] = True
limited_types = volume_types.get_all_types(
context, search_opts=filters).values()
return limited_types
def create_resource():
return wsgi.Resource(VolumeTypesController())
|
Yukarumya/Yukarum-Redfoxes | refs/heads/master | python/mozbuild/mozbuild/action/test_archive.py | 1 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# This action is used to produce test archives.
#
# Ideally, the data in this file should be defined in moz.build files.
# It is defined inline because this was easiest to make test archive
# generation faster.
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import itertools
import os
import sys
import time
from manifestparser import TestManifest
from reftest import ReftestManifest
from mozbuild.util import ensureParentDir
from mozpack.files import FileFinder
from mozpack.mozjar import JarWriter
import mozpack.path as mozpath
import buildconfig
STAGE = mozpath.join(buildconfig.topobjdir, 'dist', 'test-stage')
TEST_HARNESS_BINS = [
'BadCertServer',
'GenerateOCSPResponse',
'OCSPStaplingServer',
'SmokeDMD',
'certutil',
'crashinject',
'fileid',
'minidumpwriter',
'pk12util',
'screenshot',
'screentopng',
'ssltunnel',
'xpcshell',
]
# The fileid utility depends on mozglue. See bug 1069556.
TEST_HARNESS_DLLS = [
'crashinjectdll',
'mozglue'
]
TEST_PLUGIN_DLLS = [
'npsecondtest',
'npswftest',
'nptest',
'nptestjava',
'npthirdtest',
]
TEST_PLUGIN_DIRS = [
'JavaTest.plugin/**',
'SecondTest.plugin/**',
'Test.plugin/**',
'ThirdTest.plugin/**',
'npswftest.plugin/**',
]
GMP_TEST_PLUGIN_DIRS = [
'gmp-clearkey/**',
'gmp-fake/**',
'gmp-fakeopenh264/**',
]
ARCHIVE_FILES = {
'common': [
{
'source': STAGE,
'base': '',
'pattern': '**',
'ignore': [
'cppunittest/**',
'gtest/**',
'mochitest/**',
'reftest/**',
'talos/**',
'web-platform/**',
'xpcshell/**',
],
},
{
'source': buildconfig.topobjdir,
'base': '_tests',
'pattern': 'modules/**',
},
{
'source': buildconfig.topsrcdir,
'base': 'testing/marionette',
'patterns': [
'client/**',
'harness/**',
'puppeteer/**',
'mach_test_package_commands.py',
],
'dest': 'marionette',
'ignore': [
'client/docs',
'harness/marionette_harness/tests',
'puppeteer/firefox/docs',
],
},
{
'source': buildconfig.topsrcdir,
'base': '',
'manifests': [
'testing/marionette/harness/marionette_harness/tests/unit-tests.ini',
'testing/marionette/harness/marionette_harness/tests/webapi-tests.ini',
],
# We also need the manifests and harness_unit tests
'pattern': 'testing/marionette/harness/marionette_harness/tests/**',
'dest': 'marionette/tests',
},
{
'source': buildconfig.topobjdir,
'base': '_tests',
'pattern': 'mozbase/**',
},
{
'source': buildconfig.topsrcdir,
'base': 'testing',
'pattern': 'firefox-ui/**',
},
{
'source': buildconfig.topsrcdir,
'base': 'dom/media/test/external',
'pattern': '**',
'dest': 'external-media-tests',
},
{
'source': buildconfig.topsrcdir,
'base': 'js/src',
'pattern': 'jit-test/**',
'dest': 'jit-test',
},
{
'source': buildconfig.topsrcdir,
'base': 'js/src/tests',
'pattern': 'ecma_6/**',
'dest': 'jit-test/tests',
},
{
'source': buildconfig.topsrcdir,
'base': 'js/src/tests',
'pattern': 'js1_8_5/**',
'dest': 'jit-test/tests',
},
{
'source': buildconfig.topsrcdir,
'base': 'js/src/tests',
'pattern': 'lib/**',
'dest': 'jit-test/tests',
},
{
'source': buildconfig.topsrcdir,
'base': 'js/src',
'pattern': 'jsapi.h',
'dest': 'jit-test',
},
{
'source': buildconfig.topsrcdir,
'base': 'testing',
'pattern': 'tps/**',
},
{
'source': buildconfig.topsrcdir,
'base': 'services/sync/',
'pattern': 'tps/**',
},
{
'source': buildconfig.topsrcdir,
'base': 'services/sync/tests/tps',
'pattern': '**',
'dest': 'tps/tests',
},
{
'source': buildconfig.topsrcdir,
'base': 'testing/web-platform/tests/tools/wptserve',
'pattern': '**',
'dest': 'tools/wptserve',
},
{
'source': buildconfig.topobjdir,
'base': '',
'pattern': 'mozinfo.json',
},
{
'source': buildconfig.topobjdir,
'base': 'dist/bin',
'patterns': [
'%s%s' % (f, buildconfig.substs['BIN_SUFFIX'])
for f in TEST_HARNESS_BINS
] + [
'%s%s%s' % (buildconfig.substs['DLL_PREFIX'], f, buildconfig.substs['DLL_SUFFIX'])
for f in TEST_HARNESS_DLLS
],
'dest': 'bin',
},
{
'source': buildconfig.topobjdir,
'base': 'dist/plugins',
'patterns': [
'%s%s%s' % (buildconfig.substs['DLL_PREFIX'], f, buildconfig.substs['DLL_SUFFIX'])
for f in TEST_PLUGIN_DLLS
],
'dest': 'bin/plugins',
},
{
'source': buildconfig.topobjdir,
'base': 'dist/plugins',
'patterns': TEST_PLUGIN_DIRS,
'dest': 'bin/plugins',
},
{
'source': buildconfig.topobjdir,
'base': 'dist/bin',
'patterns': GMP_TEST_PLUGIN_DIRS,
'dest': 'bin/plugins',
},
{
'source': buildconfig.topobjdir,
'base': 'dist/bin',
'patterns': [
'dmd.py',
'fix_linux_stack.py',
'fix_macosx_stack.py',
'fix_stack_using_bpsyms.py',
],
'dest': 'bin',
},
{
'source': buildconfig.topobjdir,
'base': 'dist/bin/components',
'patterns': [
'httpd.js',
'httpd.manifest',
'test_necko.xpt',
],
'dest': 'bin/components',
},
{
'source': buildconfig.topsrcdir,
'base': 'build/pgo/certs',
'pattern': '**',
'dest': 'certs',
}
],
'cppunittest': [
{
'source': STAGE,
'base': '',
'pattern': 'cppunittest/**',
},
# We don't ship these files if startup cache is disabled, which is
# rare. But it shouldn't matter for test archives.
{
'source': buildconfig.topsrcdir,
'base': 'startupcache/test',
'pattern': 'TestStartupCacheTelemetry.*',
'dest': 'cppunittest',
},
{
'source': buildconfig.topsrcdir,
'base': 'testing',
'pattern': 'runcppunittests.py',
'dest': 'cppunittest',
},
{
'source': buildconfig.topsrcdir,
'base': 'testing',
'pattern': 'remotecppunittests.py',
'dest': 'cppunittest',
},
{
'source': buildconfig.topsrcdir,
'base': 'testing',
'pattern': 'cppunittest.ini',
'dest': 'cppunittest',
},
{
'source': buildconfig.topobjdir,
'base': '',
'pattern': 'mozinfo.json',
'dest': 'cppunittest',
},
],
'gtest': [
{
'source': STAGE,
'base': '',
'pattern': 'gtest/**',
},
],
'mochitest': [
{
'source': buildconfig.topobjdir,
'base': '_tests/testing',
'pattern': 'mochitest/**',
},
{
'source': STAGE,
'base': '',
'pattern': 'mochitest/**',
},
{
'source': buildconfig.topobjdir,
'base': '',
'pattern': 'mozinfo.json',
'dest': 'mochitest'
}
],
'mozharness': [
{
'source': buildconfig.topsrcdir,
'base': 'testing',
'pattern': 'mozharness/**',
},
],
'reftest': [
{
'source': buildconfig.topobjdir,
'base': '_tests',
'pattern': 'reftest/**',
},
{
'source': buildconfig.topobjdir,
'base': '',
'pattern': 'mozinfo.json',
'dest': 'reftest',
},
{
'source': buildconfig.topsrcdir,
'base': '',
'manifests': [
'layout/reftests/reftest.list',
'testing/crashtest/crashtests.list',
],
'dest': 'reftest/tests',
}
],
'talos': [
{
'source': buildconfig.topsrcdir,
'base': 'testing',
'pattern': 'talos/**',
},
],
'web-platform': [
{
'source': buildconfig.topsrcdir,
'base': 'testing',
'pattern': 'web-platform/meta/**',
},
{
'source': buildconfig.topsrcdir,
'base': 'testing',
'pattern': 'web-platform/mozilla/**',
},
{
'source': buildconfig.topsrcdir,
'base': 'testing',
'pattern': 'web-platform/tests/**',
},
{
'source': buildconfig.topobjdir,
'base': '_tests',
'pattern': 'web-platform/**',
},
{
'source': buildconfig.topobjdir,
'base': '',
'pattern': 'mozinfo.json',
'dest': 'web-platform',
},
],
'xpcshell': [
{
'source': buildconfig.topobjdir,
'base': '_tests/xpcshell',
'pattern': '**',
'dest': 'xpcshell/tests',
},
{
'source': buildconfig.topsrcdir,
'base': 'testing/xpcshell',
'patterns': [
'head.js',
'mach_test_package_commands.py',
'moz-http2/**',
'moz-spdy/**',
'node-http2/**',
'node-spdy/**',
'remotexpcshelltests.py',
'runtestsb2g.py',
'runxpcshelltests.py',
'xpcshellcommandline.py',
],
'dest': 'xpcshell',
},
{
'source': STAGE,
'base': '',
'pattern': 'xpcshell/**',
},
{
'source': buildconfig.topobjdir,
'base': '',
'pattern': 'mozinfo.json',
'dest': 'xpcshell',
},
{
'source': buildconfig.topobjdir,
'base': 'build',
'pattern': 'automation.py',
'dest': 'xpcshell',
},
],
}
# "common" is our catch all archive and it ignores things from other archives.
# Verify nothing sneaks into ARCHIVE_FILES without a corresponding exclusion
# rule in the "common" archive.
for k, v in ARCHIVE_FILES.items():
# Skip mozharness because it isn't staged.
if k in ('common', 'mozharness'):
continue
ignores = set(itertools.chain(*(e.get('ignore', [])
for e in ARCHIVE_FILES['common'])))
if not any(p.startswith('%s/' % k) for p in ignores):
raise Exception('"common" ignore list probably should contain %s' % k)
def find_files(archive):
for entry in ARCHIVE_FILES[archive]:
source = entry['source']
dest = entry.get('dest')
base = entry.get('base', '')
pattern = entry.get('pattern')
patterns = entry.get('patterns', [])
if pattern:
patterns.append(pattern)
manifest = entry.get('manifest')
manifests = entry.get('manifests', [])
if manifest:
manifests.append(manifest)
if manifests:
dirs = find_manifest_dirs(buildconfig.topsrcdir, manifests)
patterns.extend({'{}/**'.format(d) for d in dirs})
ignore = list(entry.get('ignore', []))
ignore.extend([
'**/.flake8',
'**/.mkdir.done',
'**/*.pyc',
])
common_kwargs = {
'find_executables': False,
'find_dotfiles': True,
'ignore': ignore,
}
finder = FileFinder(os.path.join(source, base), **common_kwargs)
for pattern in patterns:
for p, f in finder.find(pattern):
if dest:
p = mozpath.join(dest, p)
yield p, f
def find_manifest_dirs(topsrcdir, manifests):
"""Routine to retrieve directories specified in a manifest, relative to topsrcdir.
It does not recurse into manifests, as we currently have no need for that.
"""
dirs = set()
for p in manifests:
p = os.path.join(topsrcdir, p)
if p.endswith('.ini'):
test_manifest = TestManifest()
test_manifest.read(p)
dirs |= set([os.path.dirname(m) for m in test_manifest.manifests()])
elif p.endswith('.list'):
m = ReftestManifest()
m.load(p)
dirs |= m.dirs
else:
raise Exception('"{}" is not a supported manifest format.'.format(
os.path.splitext(p)[1]))
dirs = {mozpath.normpath(d[len(topsrcdir):]).lstrip('/') for d in dirs}
# Filter out children captured by parent directories because duplicates
# will confuse things later on.
def parents(p):
while True:
p = mozpath.dirname(p)
if not p:
break
yield p
seen = set()
for d in sorted(dirs, key=len):
if not any(p in seen for p in parents(d)):
seen.add(d)
return sorted(seen)
def main(argv):
parser = argparse.ArgumentParser(
description='Produce test archives')
parser.add_argument('archive', help='Which archive to generate')
parser.add_argument('outputfile', help='File to write output to')
args = parser.parse_args(argv)
if not args.outputfile.endswith('.zip'):
raise Exception('expected zip output file')
file_count = 0
t_start = time.time()
ensureParentDir(args.outputfile)
with open(args.outputfile, 'wb') as fh:
# Experimentation revealed that level 5 is significantly faster and has
# marginally larger sizes than higher values and is the sweet spot
# for optimal compression. Read the detailed commit message that
# introduced this for raw numbers.
with JarWriter(fileobj=fh, optimize=False, compress_level=5) as writer:
res = find_files(args.archive)
for p, f in res:
writer.add(p.encode('utf-8'), f.read(), mode=f.mode, skip_duplicates=True)
file_count += 1
duration = time.time() - t_start
zip_size = os.path.getsize(args.outputfile)
basename = os.path.basename(args.outputfile)
print('Wrote %d files in %d bytes to %s in %.2fs' % (
file_count, zip_size, basename, duration))
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
freifeld/cpabDiffeo | refs/heads/master | cpab/cpa2d/utils/HOLD_IT_get_stuff_for_the_local_version.py | 1 | #!/usr/bin/env python
"""
Created on Sun Nov 30 11:52:49 2014
Author: Oren Freifeld
Email: freifeld@csail.mit.edu
"""
import numpy as np
from scipy.linalg import inv
from scipy.sparse import linalg as ssl
from scipy.sparse import lil_matrix
from of.utils import Bunch
from of.utils import ipshell
def get_stuff_for_the_local_version(cpa_space,cells_verts):
if cpa_space.tess != 'tri':
return None
raise ValueError(cpa_space.tess)
nC = cpa_space.nC
nHomoCoo = cpa_space.nHomoCoo
lengthAvee = cpa_space.lengthAvee
dim_range = cpa_space.dim_range
b = Bunch()
X = np.zeros((nC,lengthAvee,lengthAvee))
Xinv = np.zeros_like(X)
if dim_domain == 1:
raise NotImplementedError
elif dim_domain == 2:
for (x,xinv,(vrt0,vrt1,vrt2)) in zip(X,Xinv,cells_verts):
x[0,:3]=x[1,3:]=vrt0
x[2,:3]=x[3,3:]=vrt1
x[4,:3]=x[5,3:]=vrt2
xinv[:]=inv(x)
elif dim_domain == 3:
raise NotImplementedError
else:
raise NotImplementedError
vert_tess = []
vert_tess_one_cell = []
ind_into_vert_tess = np.zeros((nC,nHomoCoo),np.int)
for c,cell_verts in enumerate(cells_verts):
for j,v in enumerate(cell_verts):
t = tuple(v.tolist())
if t not in vert_tess:
vert_tess.append(t)
# c is the cell index
# j is the index of this vertex within this cell
vert_tess_one_cell.append((c,j))
ind_into_vert_tess[c,j]=vert_tess.index(t)
vert_tess = np.asarray(vert_tess)
vert_tess_one_cell = np.asarray(vert_tess_one_cell)
b.vert_tess = vert_tess
b.ind_into_vert_tess = ind_into_vert_tess
b.Xinv = Xinv
b.X = X
"""
Build a sparse matrix H such that
Avees = H times velTess
The values of H, which is sparse, are dictated by vertTess.
H.shape = (lengthAvee*nC,len(vert_tess)*dim_range)
"""
H = np.zeros((lengthAvee*nC,len(vert_tess)*dim_range))
for c in range(nC):
ind = ind_into_vert_tess[c]
ind_all_coo = np.zeros((len(ind),dim_range),np.int)
for coo in range(dim_range):
ind_all_coo[:,coo]=ind*dim_range+coo
H[c*lengthAvee:(c+1)*lengthAvee,ind_all_coo.ravel()]=Xinv[c]
#
"""
Build a sparse matrix H such that
velTess = G times Avees
G.shape = (len(vert_tess)*dim_range,lengthAvee*nC)
"""
G = np.zeros((len(vert_tess)*dim_range,lengthAvee*nC))
for i in range(vert_tess.shape[0]):
# c is the cell index
# j is the index of this vertex within this cell
c,j = vert_tess_one_cell[i]
for coo in range(dim_range):
G[i*dim_range+coo,lengthAvee*c:lengthAvee*(c+1)]=X[c][j*dim_range+coo]
# ipshell('hi')
b.mat_velTess2Avees = H
b.mat_Avees2velTess = G
#
if 1:
def mv1(v):
return H.dot(v)
def mv2(v):
return G.dot(v)
def rmv1(v):
return H.T.dot(v)
def rmv2(v):
return G.T.dot(v)
def mm1(V):
return H.dot(V)
def mm2(V):
return G.dot(V)
_H = ssl.LinearOperator(lil_matrix(H).shape,matvec=mv1,
rmatvec=rmv1,
matmat=mm1)
_G = ssl.LinearOperator(lil_matrix(G).shape,matvec=mv2,
rmatvec=rmv2,
matmat=mm2)
b.mat_velTess2Avees = _H
b.mat_Avees2velTess = _G
return b
if __name__ == "__main__":
pass
|
ChrisBird/ardupilot | refs/heads/master | Tools/LogAnalyzer/VehicleType.py | 187 | class VehicleType():
Plane = 17
Copter = 23
Rover = 37
# these should really be "Plane", "Copter" and "Rover", but many
# things use these values as triggers in their code:
VehicleTypeString = {
17: "ArduPlane",
23: "ArduCopter",
37: "ArduRover"
}
|
xiaoshaozi52/ansible | refs/heads/devel | v1/ansible/runner/action_plugins/add_host.py | 107 | # Copyright 2012, Seth Vidal <skvidal@fedoraproject.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import ansible
from ansible.callbacks import vv
from ansible.errors import AnsibleError as ae
from ansible.runner.return_data import ReturnData
from ansible.utils import parse_kv, combine_vars
from ansible.inventory.host import Host
from ansible.inventory.group import Group
class ActionModule(object):
''' Create inventory hosts and groups in the memory inventory'''
### We need to be able to modify the inventory
BYPASS_HOST_LOOP = True
TRANSFERS_FILES = False
def __init__(self, runner):
self.runner = runner
def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
if self.runner.noop_on_check(inject):
return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not supported for this module'))
args = {}
if complex_args:
args.update(complex_args)
args.update(parse_kv(module_args))
if not 'hostname' in args and not 'name' in args:
raise ae("'name' is a required argument.")
result = {}
# Parse out any hostname:port patterns
new_name = args.get('name', args.get('hostname', None))
vv("creating host via 'add_host': hostname=%s" % new_name)
if ":" in new_name:
new_name, new_port = new_name.split(":")
args['ansible_ssh_port'] = new_port
# redefine inventory and get group "all"
inventory = self.runner.inventory
allgroup = inventory.get_group('all')
# check if host in cache, add if not
if new_name in inventory._hosts_cache:
new_host = inventory._hosts_cache[new_name]
else:
new_host = Host(new_name)
# only groups can be added directly to inventory
inventory._hosts_cache[new_name] = new_host
allgroup.add_host(new_host)
groupnames = args.get('groupname', args.get('groups', args.get('group', '')))
# add it to the group if that was specified
if groupnames:
for group_name in groupnames.split(","):
group_name = group_name.strip()
if not inventory.get_group(group_name):
new_group = Group(group_name)
inventory.add_group(new_group)
new_group.vars = inventory.get_group_variables(group_name, vault_password=inventory._vault_password)
grp = inventory.get_group(group_name)
grp.add_host(new_host)
# add this host to the group cache
if inventory._groups_list is not None:
if group_name in inventory._groups_list:
if new_host.name not in inventory._groups_list[group_name]:
inventory._groups_list[group_name].append(new_host.name)
vv("added host to group via add_host module: %s" % group_name)
result['new_groups'] = groupnames.split(",")
# actually load host vars
new_host.vars = combine_vars(new_host.vars, inventory.get_host_variables(new_name, update_cached=True, vault_password=inventory._vault_password))
# Add any passed variables to the new_host
for k in args.keys():
if not k in [ 'name', 'hostname', 'groupname', 'groups' ]:
new_host.set_variable(k, args[k])
result['new_host'] = new_name
# clear pattern caching completely since it's unpredictable what
# patterns may have referenced the group
inventory.clear_pattern_cache()
return ReturnData(conn=conn, comm_ok=True, result=result)
|
jswope00/griffinx | refs/heads/master | openedx/core/djangoapps/user_api/tests/test_course_tag_api.py | 11 | """
Test the user course tag API.
"""
from django.test import TestCase
from student.tests.factories import UserFactory
from openedx.core.djangoapps.user_api.api import course_tag as course_tag_api
from opaque_keys.edx.locations import SlashSeparatedCourseKey
class TestCourseTagAPI(TestCase):
"""
Test the user service
"""
def setUp(self):
self.user = UserFactory.create()
self.course_id = SlashSeparatedCourseKey('test_org', 'test_course_number', 'test_run')
self.test_key = 'test_key'
def test_get_set_course_tag(self):
# get a tag that doesn't exist
tag = course_tag_api.get_course_tag(self.user, self.course_id, self.test_key)
self.assertIsNone(tag)
# test setting a new key
test_value = 'value'
course_tag_api.set_course_tag(self.user, self.course_id, self.test_key, test_value)
tag = course_tag_api.get_course_tag(self.user, self.course_id, self.test_key)
self.assertEqual(tag, test_value)
#test overwriting an existing key
test_value = 'value2'
course_tag_api.set_course_tag(self.user, self.course_id, self.test_key, test_value)
tag = course_tag_api.get_course_tag(self.user, self.course_id, self.test_key)
self.assertEqual(tag, test_value)
|
sorblack-x/prime-jack | refs/heads/master | chatterbotapi.py | 6 | import re
import sys
import hashlib
if sys.version_info >= (3, 0):
from urllib.request import build_opener, HTTPCookieProcessor, urlopen
from urllib.parse import urlencode
import http.cookiejar as cookielib
else:
from urllib import urlencode, urlopen
from urllib2 import build_opener, HTTPCookieProcessor
import cookielib
import uuid
import xml.dom.minidom
"""
chatterbotapi
Copyright (C) 2011 pierredavidbelanger@gmail.com
"""
#################################################
# API
#################################################
class ChatterBotType:
CLEVERBOT = 1
JABBERWACKY = 2
PANDORABOTS = 3
class ChatterBotFactory:
def create(self, type, arg = None):
if type == ChatterBotType.CLEVERBOT:
return _Cleverbot('http://www.cleverbot.com', 'http://www.cleverbot.com/webservicemin', 35)
elif type == ChatterBotType.JABBERWACKY:
return _Cleverbot('http://jabberwacky.com', 'http://jabberwacky.com/webservicemin', 29)
elif type == ChatterBotType.PANDORABOTS:
if arg == None:
raise Exception('PANDORABOTS needs a botid arg')
return _Pandorabots(arg)
return None
class ChatterBot:
def create_session(self):
return None
class ChatterBotSession:
def think_thought(self, thought):
return thought
def think(self, text):
thought = ChatterBotThought()
thought.text = text
return self.think_thought(thought).text
class ChatterBotThought:
pass
#################################################
# Cleverbot impl
#################################################
class _Cleverbot(ChatterBot):
def __init__(self, baseUrl, serviceUrl, endIndex):
self.baseUrl = baseUrl
self.serviceUrl = serviceUrl
self.endIndex = endIndex
def create_session(self):
return _CleverbotSession(self)
class _CleverbotSession(ChatterBotSession):
def __init__(self, bot):
self.bot = bot
self.vars = {}
self.vars['start'] = 'y'
self.vars['icognoid'] = 'wsf'
self.vars['fno'] = '0'
self.vars['sub'] = 'Say'
self.vars['islearning'] = '1'
self.vars['cleanslate'] = 'false'
self.cookieJar = cookielib.CookieJar()
self.opener = build_opener(HTTPCookieProcessor(self.cookieJar))
self.opener.open(self.bot.baseUrl)
def think_thought(self, thought):
self.vars['stimulus'] = thought.text
data = urlencode(self.vars)
data_to_digest = data[9:self.bot.endIndex]
data_digest = hashlib.md5(data_to_digest.encode('utf-8')).hexdigest()
data = data + '&icognocheck=' + data_digest
url_response = self.opener.open(self.bot.serviceUrl, data.encode('utf-8'))
response = str(url_response.read())
response_values = re.split(r'\\r|\r', response)
#self.vars['??'] = _utils_string_at_index(response_values, 0)
self.vars['sessionid'] = _utils_string_at_index(response_values, 1)
self.vars['logurl'] = _utils_string_at_index(response_values, 2)
self.vars['vText8'] = _utils_string_at_index(response_values, 3)
self.vars['vText7'] = _utils_string_at_index(response_values, 4)
self.vars['vText6'] = _utils_string_at_index(response_values, 5)
self.vars['vText5'] = _utils_string_at_index(response_values, 6)
self.vars['vText4'] = _utils_string_at_index(response_values, 7)
self.vars['vText3'] = _utils_string_at_index(response_values, 8)
self.vars['vText2'] = _utils_string_at_index(response_values, 9)
self.vars['prevref'] = _utils_string_at_index(response_values, 10)
#self.vars['??'] = _utils_string_at_index(response_values, 11)
self.vars['emotionalhistory'] = _utils_string_at_index(response_values, 12)
self.vars['ttsLocMP3'] = _utils_string_at_index(response_values, 13)
self.vars['ttsLocTXT'] = _utils_string_at_index(response_values, 14)
self.vars['ttsLocTXT3'] = _utils_string_at_index(response_values, 15)
self.vars['ttsText'] = _utils_string_at_index(response_values, 16)
self.vars['lineRef'] = _utils_string_at_index(response_values, 17)
self.vars['lineURL'] = _utils_string_at_index(response_values, 18)
self.vars['linePOST'] = _utils_string_at_index(response_values, 19)
self.vars['lineChoices'] = _utils_string_at_index(response_values, 20)
self.vars['lineChoicesAbbrev'] = _utils_string_at_index(response_values, 21)
self.vars['typingData'] = _utils_string_at_index(response_values, 22)
self.vars['divert'] = _utils_string_at_index(response_values, 23)
response_thought = ChatterBotThought()
response_thought.text = _utils_string_at_index(response_values, 16)
return response_thought
#################################################
# Pandorabots impl
#################################################
class _Pandorabots(ChatterBot):
def __init__(self, botid):
self.botid = botid
def create_session(self):
return _PandorabotsSession(self)
class _PandorabotsSession(ChatterBotSession):
def __init__(self, bot):
self.vars = {}
self.vars['botid'] = bot.botid
self.vars['custid'] = uuid.uuid1()
def think_thought(self, thought):
self.vars['input'] = thought.text
data = urlencode(self.vars)
url_response = urlopen('http://www.pandorabots.com/pandora/talk-xml', data)
response = url_response.read()
response_dom = xml.dom.minidom.parseString(response)
response_thought = ChatterBotThought()
that_elements = response_dom.getElementsByTagName('that')
if that_elements is None or len(that_elements) == 0 or that_elements[0] is None:
return ''
that_elements_child_nodes = that_elements[0].childNodes
if that_elements_child_nodes is None or len(that_elements_child_nodes) == 0 or that_elements_child_nodes[0] is None:
return ''
that_elements_child_nodes_data = that_elements_child_nodes[0].data
if that_elements_child_nodes_data is None:
return ''
response_thought.text = that_elements_child_nodes_data.strip()
return response_thought
#################################################
# Utils
#################################################
def _utils_string_at_index(strings, index):
if len(strings) > index:
return strings[index]
else:
return ''
|
jbteixeir/Openflow-DC-Framework | refs/heads/master | pox/topology/topology.py | 31 | # Copyright 2011 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
The Topology module is the root of an object model composed of entities
like switches, hosts, links, etc. This object model is populated by other
modules. For example, openflow.topology populates the topology object
with OpenFlow switches.
Note that this means that you often want to invoke something like:
$ ./pox.py topology openflow.discovery openflow.topology
"""
from pox.lib.revent import *
from pox.core import core
from pox.lib.addresses import *
import traceback
import pickle
class EntityEvent (Event):
def __init__ (self, entity):
Event.__init__(self)
self.entity = entity
class EntityJoin (EntityEvent):
"""
An entity has been added.
Note that if there is a more specific join event defined for a particular
entity, (e.g., SwitchJoin), this event will not be fired.
TODO: or we could always raise EntityJoins along with SwitchJoins, which
seems more intuitive to me.
"""
pass
class EntityLeave (EntityEvent):
"""
An entity has been removed
Note that if there is a more specific leave event defined for a particular
entity, (e.g., SwitchLeave), this event will not be fired.
TODO: or we could always raise EntityLeaves along with SwitchLeaves, which
seems more intuitive to me.
"""
pass
class SwitchEvent (EntityEvent): pass
class SwitchJoin (SwitchEvent):
"""
As opposed to ConnectionUp, SwitchJoin occurs over large time scales
(e.g. an administrator physically moving a switch).
"""
def __init__ (self, switch):
SwitchEvent.__init__(self, switch)
self.switch = switch
class SwitchLeave (SwitchEvent):
"""
As opposed to ConnectionDown, SwitchLeave occurs over large time scales
(e.g. an administrator physically moving a switch).
"""
pass
class SwitchConnectionUp(SwitchEvent):
def __init__(self, switch, connection):
SwitchEvent.__init__(self, switch)
self.switch = switch
self.connection = connection
class SwitchConnectionDown(SwitchEvent): pass
class HostEvent (EntityEvent): pass
class HostJoin (HostEvent): pass
class HostLeave (HostEvent): pass
class Update (Event):
"""
Fired by Topology whenever anything has changed
"""
def __init__ (self, event=None):
Event.__init__(self)
self.event = event
class Entity (object):
"""
Note that the Entity class is intentionally simple; It only serves as a
convenient SuperClass type.
It's up to subclasses to implement specific functionality (e.g.
OpenFlow1.0 switch functionality). The purpose of this design decision
is to prevent protocol specific details from being leaked into this
module... but this design decision does /not/ imply that pox.toplogy
serves to define a generic interface to abstract entity types.
NOTE: /all/ subclasses must call this superconstructor, since
the unique self.id is field is used by Topology
"""
# This is a counter used so that we can get unique IDs for entities.
# Some entities don't need this because they have more meaningful
# identifiers.
_next_id = 101
_all_ids = set()
_tb = {}
def __init__ (self, id=None):
if id:
if id in Entity._all_ids:
print("".join(traceback.format_list(self._tb[id])))
raise Exception("ID %s already taken" % str(id))
else:
while Entity._next_id in Entity._all_ids:
Entity._next_id += 1
id = Entity._next_id
self._tb[id] = traceback.extract_stack()
Entity._all_ids.add(id)
self.id = id
def serialize(self):
return pickle.dumps(self, protocol = 0)
@classmethod
def deserialize(cls):
return pickle.loads(cls, protocol = 0)
class Host (Entity):
"""
A generic Host entity.
"""
def __init__(self,id=None):
Entity.__init__(self, id)
class Switch (Entity):
"""
Subclassed by protocol-specific switch classes,
e.g. pox.openflow.topology.OpenFlowSwitch
"""
def __init__(self, id=None):
# Switches often have something more meaningful to use as an ID
# (e.g., a DPID or MAC address), so they take it as a parameter.
Entity.__init__(self, id)
class Port (Entity):
def __init__ (self, num, hwAddr, name):
Entity.__init__(self)
self.number = num
self.hwAddr = EthAddr(hwAddr)
self.name = name
class Controller (Entity):
def __init__(self, name, handshake_complete=False):
self.id = name
# TODO: python aliases?
self.name = name
self.handshake_complete = handshake_complete
def handshake_completed(self):
self.handshake_complete = True
class Topology (EventMixin):
_eventMixin_events = [
SwitchJoin,
SwitchLeave,
HostJoin,
HostLeave,
EntityJoin,
EntityLeave,
Update
]
_core_name = "topology" # We want to be core.topology
def __init__ (self, name="topology"):
EventMixin.__init__(self)
self._entities = {}
self.name = name
self.log = core.getLogger(name)
# If a client registers a handler for these events after they have
# already occurred, we promise to re-issue them to the newly joined
# client.
self._event_promises = {
SwitchJoin : self._fulfill_SwitchJoin_promise
}
def getEntityByID (self, ID, fail=False):
"""
Raises an exception if fail is True and the entity doesn't exist
See also: The 'entity' property.
"""
if fail:
return self._entities[ID]
else:
return self._entities.get(ID, None)
def removeEntity (self, entity):
del self._entities[entity.id]
self.log.info(str(entity) + " left")
if isinstance(entity, Switch):
self.raiseEvent(SwitchLeave, entity)
elif isinstance(entity, Host):
self.raiseEvent(HostLeave, entity)
else:
self.raiseEvent(EntityLeave, entity)
def addEntity (self, entity):
""" Will raise an exception if entity.id already exists """
if entity.id in self._entities:
raise RuntimeError("Entity exists")
self._entities[entity.id] = entity
self.log.debug(str(entity) + " (id: " + str(entity.id) + ") joined")
if isinstance(entity, Switch):
self.raiseEvent(SwitchJoin, entity)
elif isinstance(entity, Host):
self.raiseEvent(HostJoin, entity)
else:
self.raiseEvent(EntityJoin, entity)
def getEntitiesOfType (self, t=Entity, subtypes=True):
if subtypes is False:
return [x for x in self._entities.itervalues() if type(x) is t]
else:
return [x for x in self._entities.itervalues() if isinstance(x, t)]
def addListener(self, eventType, handler, once=False, weak=False,
priority=None, byName=False):
"""
We interpose on EventMixin.addListener to check if the eventType is
in our promise list. If so, trigger the handler for all previously
triggered events.
"""
if eventType in self._event_promises:
self._event_promises[eventType](handler)
return EventMixin.addListener(self, eventType, handler, once=once,
weak=weak, priority=priority,
byName=byName)
def raiseEvent (self, event, *args, **kw):
"""
Whenever we raise any event, we also raise an Update, so we extend
the implementation in EventMixin.
"""
rv = EventMixin.raiseEvent(self, event, *args, **kw)
if type(event) is not Update:
EventMixin.raiseEvent(self, Update(event))
return rv
def serialize (self):
"""
Picklize our current entities.
Returns a hash: { id -> pickled entitiy }
"""
id2entity = {}
for id in self._entities:
entity = self._entities[id]
id2entity[id] = entity.serialize()
return id2entity
def deserializeAndMerge (self, id2entity):
"""
Given the output of topology.serialize(), deserialize each entity, and:
- insert a new Entry if it didn't already exist here, or
- update a pre-existing entry if it already existed
"""
for entity_id in id2entity.keys():
pickled_entity = id2entity[entity_id].encode('ascii', 'ignore')
entity = pickle.loads(pickled_entity)
entity.id = entity_id.encode('ascii', 'ignore')
try:
# Try to parse it as an int
entity.id = int(entity.id)
except ValueError:
pass
existing_entity = self.getEntityByID(entity.id)
if existing_entity:
self.log.debug("New metadata for %s: %s " % (str(existing_entity), str(entity)))
# TODO: define an Entity.merge method (need to do something about his update!)
else:
self.addEntity(entity)
def _fulfill_SwitchJoin_promise(self, handler):
""" Trigger the SwitchJoin handler for all pre-existing switches """
for switch in self.getEntitiesOfType(Switch, True):
handler(SwitchJoin(switch))
def __len__(self):
return len(self._entities)
def __str__(self):
# TODO: display me graphically
strings = []
strings.append("topology (%d total entities)" % len(self._entities))
for id,entity in self._entities.iteritems():
strings.append("%s %s" % (str(id), str(entity)))
return '\n'.join(strings)
|
komsas/OpenUpgrade | refs/heads/master | addons/website_mail_group/__init__.py | 1350 | import controllers
|
AlexPereverzyev/spidy | refs/heads/master | spidy/document/xpath.py | 1 |
''' XPath expression parser. Please see Spidy documentation for what's supported. '''
__all__ = ['XPath']
from selectors import *
from spidy.common import *
from spidy.common.sniff import is_int
XP_STRING = ['\'', '"']
XP_UNDEFINED = 0
XP_SEEK_NAME = 1
XP_SEEK_INDEX = 2
XP_SEEK_ATTR = 4
XP_SEEK_ATTR_EXIST = 8
XP_SEEK_ATTR_MATCH = 16
XP_SEEK_CLOSURE = 32
XP_SEEK_SEARCH = 64
XP_READ_STRING = 128
XP_START_NEW = 256
class XPath(object):
''' XPath helper class, contains commonly used XPath parsing and evaluation
routines. '''
_id = None
_sline = None
_exp_string = None
_paths = None
def __init__(self, id, sline, exp_string):
self._id = id
self._sline = sline
self._exp_string = exp_string
self._paths = []
self._parse()
def is_empty(self):
''' Indicates that XPath expression has no selectors specified. '''
return len(self._paths) == 0
def get_paths(self):
''' Returns parsed path segments. '''
return self._paths
def apply(self, tags, cursor):
''' Applies XPath expression to supplied tag tree and returns value. '''
if tags == None: return ''
cur_tags = None
value = None
last = None
for path in self._paths:
# init, dont select any tags, if accessing current one
last = path[-1]
if not last.is_current():
if cursor != None: cur_tags = cursor.get_children()
else: cur_tags = tags
# apply selectors, ignore 'current tag' ones
for segment in [s for s in path if not s.is_current()]:
for s in segment.get_selectors():
cur_tags = s.filter(cur_tags)
if segment != last:
cur_tags = [c for t in cur_tags for c in t.get_children()]
value = self._extract_value(cursor, cur_tags, last)
if value != None:
break
cur_tags = None
# never return None
if value == None:
msg = 'XPath: couldn\'t resolve path when applying: {0}, line {1}'.format(self._exp_string, self._sline.number+1)
log.warning(self._id, msg)
if last == None or last.is_single():
value = ''
elif last != None:
value = []
return value
def skip(self, tags, cursor, reverse = False):
''' Skips tag tree to specified by XPath expression tag element and
returns it. '''
if tags == None: return None
cur_tags = None
for path in self._paths:
# init, dont select any tags, if accessing current one
last = path[-1]
if not reverse and not last.is_current():
if cursor != None: cur_tags = cursor.get_children()
else: cur_tags = tags
else:
cur_tags = [cursor]
for segment in [s for s in path if not s.is_current()]:
if not reverse:
for s in segment.get_selectors():
cur_tags = s.filter(cur_tags)
if segment != last:
cur_tags = [c for t in cur_tags for c in t.get_children()]
if cur_tags == None or len(cur_tags) == 0:
cur_tags = None
break
else:
if cursor == None:
break
for s in segment.get_selectors():
cur_tags = s.filter(cur_tags)
if cur_tags == None or len(cur_tags) == 0 or cur_tags[0].get_parent() == None:
cur_tags = None
break
else:
cur_tags = [cur_tags[0].get_parent()]
if cur_tags != None and len(cur_tags) > 0:
return cur_tags[0]
else:
msg = 'XPath: couldn\'t resolve path when skipping: {0}, line {1}'.format(self._exp_string, self._sline.number+1)
log.warning(self._id, msg)
return None
def _parse(self):
''' Parses XPath expression and returns list of paths segments ready for
evaluation (each path is composed of segments). Implemented as
deterministic state machine.
Note: thou its seems natural to reduce the result to plain list of
selectors, validation can get very kinky w/o segments which
carry current state. '''
path = []
cur = ''
pi = None
s = XP_SEEK_NAME
str_sep = None
i = -1
for c in self._exp_string:
i += 1
# reading strings inside of path expressions (next three ifs)
if c in XP_STRING and not s & XP_READ_STRING:
validate(self._id, self._sline, s == XP_SEEK_CLOSURE, 'XPath: invalid syntax')
s |= XP_READ_STRING
str_sep = c
elif c == str_sep and s & XP_READ_STRING and (i < 1 or self._exp_string[i-1] != '\\'):
validate(self._id, self._sline, s == (XP_READ_STRING | XP_SEEK_CLOSURE), 'XPath: invalid syntax')
s ^= XP_READ_STRING
str_sep = None
elif s & XP_READ_STRING:
cur += c
elif c == '|':
validate(self._id, self._sline, not s & XP_START_NEW, 'XPath: invalid syntax')
validate(self._id, self._sline, cur != '' or not s & XP_SEEK_NAME, 'XPath: tag name should be specified')
# finish current, validate current path is not empty, start new path
if pi == None and cur != '': pi = Segment()
if pi != None:
self._complete_path(path, pi, cur, s)
self._paths.append(path)
path = []
cur = ''
pi = None
s = XP_SEEK_NAME | XP_START_NEW
elif c == '/':
# finish current, start new segment
if pi == None and cur != '': pi = Segment()
if pi != None and not s & XP_SEEK_SEARCH:
self._complete_path(path, pi, cur, s)
if not s & XP_SEEK_SEARCH:
pi = Segment()
s = XP_SEEK_NAME | XP_SEEK_SEARCH
else:
pi.add_selector(FlattenSelector())
s = XP_SEEK_NAME
cur = ''
elif c == '[':
validate(self._id, self._sline, cur != '' or not s & XP_SEEK_NAME, 'XPath: tag name should be specified')
validate(self._id, self._sline, s & (XP_SEEK_NAME | XP_SEEK_ATTR | XP_SEEK_CLOSURE), 'XPath: invalid syntax')
if s & XP_SEEK_NAME:
validate(self._id, self._sline, cur != '', 'XPath: invalid syntax')
if pi == None: pi = Segment()
pi.add_selector(NameSelector(cur))
elif s & XP_SEEK_ATTR and cur.strip() != '':
pi.set_attr(cur)
s = XP_UNDEFINED
if not pi.has_index():
s |= XP_SEEK_INDEX
if not pi.has_attr_val():
s |= XP_SEEK_ATTR_EXIST
cur = ''
elif c == ']':
validate(self._id, self._sline, cur != '' or s & XP_SEEK_CLOSURE, 'XPath: index or attribute must be specified')
validate(self._id, self._sline, s & XP_SEEK_INDEX and is_int(cur) or s & (XP_SEEK_ATTR_MATCH | XP_SEEK_CLOSURE), 'XPath: invalid syntax')
if s & XP_SEEK_INDEX:
pi.add_selector(IndexSelector(int(cur)))
s = XP_UNDEFINED
if not pi.has_attr():
s |= XP_SEEK_ATTR
elif s & XP_SEEK_ATTR_MATCH:
pi.add_selector(AttributeValueSelector(cur))
s = XP_UNDEFINED
if not pi.has_index():
s |= XP_SEEK_INDEX
if not pi.has_attr():
s |= XP_SEEK_ATTR
elif s & XP_SEEK_CLOSURE:
pi.get_selectors()[-1].set_value(cur)
s = XP_UNDEFINED
if not pi.has_index():
s |= XP_SEEK_INDEX
if not pi.has_attr():
s |= XP_SEEK_ATTR
cur = ''
elif c == '@':
validate(self._id, self._sline, s & (XP_SEEK_NAME | XP_SEEK_ATTR | XP_SEEK_ATTR_EXIST), 'XPath: invalid syntax')
if s & XP_SEEK_NAME:
validate(self._id, self._sline, cur != '', 'XPath: invalid syntax')
if pi == None: pi = Segment()
pi.add_selector(NameSelector(cur))
else:
validate(self._id, self._sline, cur == '', 'XPath: invalid syntax')
if s & XP_SEEK_ATTR_EXIST:
s = XP_SEEK_ATTR_MATCH
else:
s = XP_SEEK_ATTR
cur = ''
elif c == '=':
validate(self._id, self._sline, cur != '' and s & XP_SEEK_ATTR_MATCH, 'XPath: invalid syntax')
pi.add_selector(AttributeValueSelector(cur))
s = XP_SEEK_CLOSURE
cur = ''
elif c != ' ' and c != '\t':
# dont allow current tag references in search or double wildcards
validate(self._id, self._sline,
not (c == '.' and pi != None and pi.has_search())
and not ('.' in cur or '*' in cur), 'XPath: invalid syntax')
s &= ~XP_SEEK_SEARCH
cur += c
# if path starts from word char (not slash) - add implicit search
if pi == None and len(path) == 0 and c != '.':
pi = Segment()
pi.add_selector(FlattenSelector())
if cur != '' or not s & XP_SEEK_NAME:
if pi == None: pi = Segment()
self._complete_path(path, pi, cur, s)
if len(path) > 0:
self._paths.append(path)
validate(self._id, self._sline, not s & XP_START_NEW, 'XPath: invalid syntax')
validate(self._id, self._sline, len(self._paths) > 0, 'XPath: path expression should be specified')
for p in self._paths:
if len(p) > 0:
for pi in p[:-1]:
validate(self._id, self._sline, pi.get_attr() == None, 'XPath: only last path segment can specify attribute selector')
cs = 0
for pi in p:
cs += pi.is_current()
validate(self._id, self._sline, cs <= 1, 'XPath: only one current tag selector is allowed')
def _complete_path(self, path, segment, cur, state):
c = cur.strip()
if state & XP_SEEK_NAME:
segment.add_selector(NameSelector(c))
elif state & XP_SEEK_ATTR and c != '':
segment.set_attr(c)
elif c != '':
validate(id, sline, not state & XP_SEEK_INDEX, 'XPath: invalid syntax')
path.append(segment)
def _extract_value(self, cursor, tags, segment):
value = None
if segment.is_single():
# extract single value
tag = None
if tags != None and len(tags) > 0:
tag = tags[0]
else:
# accessing current tag
tag = cursor
if tag != None:
attr = segment.get_attr()
if attr != None:
if tag.get_attrs().has_key(attr):
value = tag.get_attrs()[attr]
else:
value = tag.get_value()
if value != None and isinstance(value, basestring):
value = value.strip()
else:
# extract list of values
if tags != None and len(tags) > 0:
value = []
for t in tags:
v = None
attr = segment.get_attr()
if attr != None:
if t.get_attrs().has_key(attr):
v = t.get_attrs()[attr]
else:
v = t.get_value()
if v != None and isinstance(value, basestring):
v = v.strip()
elif value == None:
v = ''
value.append(v)
return value |
tarunkapadia93/gk_a6k | refs/heads/gk-r2 | tools/perf/scripts/python/sctop.py | 11180 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
lulandco/SickRage | refs/heads/develop | lib/unidecode/x07d.py | 253 | data = (
'Ji ', # 0x00
'Cha ', # 0x01
'Zhou ', # 0x02
'Xun ', # 0x03
'Yue ', # 0x04
'Hong ', # 0x05
'Yu ', # 0x06
'He ', # 0x07
'Wan ', # 0x08
'Ren ', # 0x09
'Wen ', # 0x0a
'Wen ', # 0x0b
'Qiu ', # 0x0c
'Na ', # 0x0d
'Zi ', # 0x0e
'Tou ', # 0x0f
'Niu ', # 0x10
'Fou ', # 0x11
'Jie ', # 0x12
'Shu ', # 0x13
'Chun ', # 0x14
'Pi ', # 0x15
'Yin ', # 0x16
'Sha ', # 0x17
'Hong ', # 0x18
'Zhi ', # 0x19
'Ji ', # 0x1a
'Fen ', # 0x1b
'Yun ', # 0x1c
'Ren ', # 0x1d
'Dan ', # 0x1e
'Jin ', # 0x1f
'Su ', # 0x20
'Fang ', # 0x21
'Suo ', # 0x22
'Cui ', # 0x23
'Jiu ', # 0x24
'Zha ', # 0x25
'Kinu ', # 0x26
'Jin ', # 0x27
'Fu ', # 0x28
'Zhi ', # 0x29
'Ci ', # 0x2a
'Zi ', # 0x2b
'Chou ', # 0x2c
'Hong ', # 0x2d
'Zha ', # 0x2e
'Lei ', # 0x2f
'Xi ', # 0x30
'Fu ', # 0x31
'Xie ', # 0x32
'Shen ', # 0x33
'Bei ', # 0x34
'Zhu ', # 0x35
'Qu ', # 0x36
'Ling ', # 0x37
'Zhu ', # 0x38
'Shao ', # 0x39
'Gan ', # 0x3a
'Yang ', # 0x3b
'Fu ', # 0x3c
'Tuo ', # 0x3d
'Zhen ', # 0x3e
'Dai ', # 0x3f
'Zhuo ', # 0x40
'Shi ', # 0x41
'Zhong ', # 0x42
'Xian ', # 0x43
'Zu ', # 0x44
'Jiong ', # 0x45
'Ban ', # 0x46
'Ju ', # 0x47
'Mo ', # 0x48
'Shu ', # 0x49
'Zui ', # 0x4a
'Wata ', # 0x4b
'Jing ', # 0x4c
'Ren ', # 0x4d
'Heng ', # 0x4e
'Xie ', # 0x4f
'Jie ', # 0x50
'Zhu ', # 0x51
'Chou ', # 0x52
'Gua ', # 0x53
'Bai ', # 0x54
'Jue ', # 0x55
'Kuang ', # 0x56
'Hu ', # 0x57
'Ci ', # 0x58
'Geng ', # 0x59
'Geng ', # 0x5a
'Tao ', # 0x5b
'Xie ', # 0x5c
'Ku ', # 0x5d
'Jiao ', # 0x5e
'Quan ', # 0x5f
'Gai ', # 0x60
'Luo ', # 0x61
'Xuan ', # 0x62
'Bing ', # 0x63
'Xian ', # 0x64
'Fu ', # 0x65
'Gei ', # 0x66
'Tong ', # 0x67
'Rong ', # 0x68
'Tiao ', # 0x69
'Yin ', # 0x6a
'Lei ', # 0x6b
'Xie ', # 0x6c
'Quan ', # 0x6d
'Xu ', # 0x6e
'Lun ', # 0x6f
'Die ', # 0x70
'Tong ', # 0x71
'Si ', # 0x72
'Jiang ', # 0x73
'Xiang ', # 0x74
'Hui ', # 0x75
'Jue ', # 0x76
'Zhi ', # 0x77
'Jian ', # 0x78
'Juan ', # 0x79
'Chi ', # 0x7a
'Mian ', # 0x7b
'Zhen ', # 0x7c
'Lu ', # 0x7d
'Cheng ', # 0x7e
'Qiu ', # 0x7f
'Shu ', # 0x80
'Bang ', # 0x81
'Tong ', # 0x82
'Xiao ', # 0x83
'Wan ', # 0x84
'Qin ', # 0x85
'Geng ', # 0x86
'Xiu ', # 0x87
'Ti ', # 0x88
'Xiu ', # 0x89
'Xie ', # 0x8a
'Hong ', # 0x8b
'Xi ', # 0x8c
'Fu ', # 0x8d
'Ting ', # 0x8e
'Sui ', # 0x8f
'Dui ', # 0x90
'Kun ', # 0x91
'Fu ', # 0x92
'Jing ', # 0x93
'Hu ', # 0x94
'Zhi ', # 0x95
'Yan ', # 0x96
'Jiong ', # 0x97
'Feng ', # 0x98
'Ji ', # 0x99
'Sok ', # 0x9a
'Kase ', # 0x9b
'Zong ', # 0x9c
'Lin ', # 0x9d
'Duo ', # 0x9e
'Li ', # 0x9f
'Lu ', # 0xa0
'Liang ', # 0xa1
'Chou ', # 0xa2
'Quan ', # 0xa3
'Shao ', # 0xa4
'Qi ', # 0xa5
'Qi ', # 0xa6
'Zhun ', # 0xa7
'Qi ', # 0xa8
'Wan ', # 0xa9
'Qian ', # 0xaa
'Xian ', # 0xab
'Shou ', # 0xac
'Wei ', # 0xad
'Qi ', # 0xae
'Tao ', # 0xaf
'Wan ', # 0xb0
'Gang ', # 0xb1
'Wang ', # 0xb2
'Beng ', # 0xb3
'Zhui ', # 0xb4
'Cai ', # 0xb5
'Guo ', # 0xb6
'Cui ', # 0xb7
'Lun ', # 0xb8
'Liu ', # 0xb9
'Qi ', # 0xba
'Zhan ', # 0xbb
'Bei ', # 0xbc
'Chuo ', # 0xbd
'Ling ', # 0xbe
'Mian ', # 0xbf
'Qi ', # 0xc0
'Qie ', # 0xc1
'Tan ', # 0xc2
'Zong ', # 0xc3
'Gun ', # 0xc4
'Zou ', # 0xc5
'Yi ', # 0xc6
'Zi ', # 0xc7
'Xing ', # 0xc8
'Liang ', # 0xc9
'Jin ', # 0xca
'Fei ', # 0xcb
'Rui ', # 0xcc
'Min ', # 0xcd
'Yu ', # 0xce
'Zong ', # 0xcf
'Fan ', # 0xd0
'Lu ', # 0xd1
'Xu ', # 0xd2
'Yingl ', # 0xd3
'Zhang ', # 0xd4
'Kasuri ', # 0xd5
'Xu ', # 0xd6
'Xiang ', # 0xd7
'Jian ', # 0xd8
'Ke ', # 0xd9
'Xian ', # 0xda
'Ruan ', # 0xdb
'Mian ', # 0xdc
'Qi ', # 0xdd
'Duan ', # 0xde
'Zhong ', # 0xdf
'Di ', # 0xe0
'Min ', # 0xe1
'Miao ', # 0xe2
'Yuan ', # 0xe3
'Xie ', # 0xe4
'Bao ', # 0xe5
'Si ', # 0xe6
'Qiu ', # 0xe7
'Bian ', # 0xe8
'Huan ', # 0xe9
'Geng ', # 0xea
'Cong ', # 0xeb
'Mian ', # 0xec
'Wei ', # 0xed
'Fu ', # 0xee
'Wei ', # 0xef
'Yu ', # 0xf0
'Gou ', # 0xf1
'Miao ', # 0xf2
'Xie ', # 0xf3
'Lian ', # 0xf4
'Zong ', # 0xf5
'Bian ', # 0xf6
'Yun ', # 0xf7
'Yin ', # 0xf8
'Ti ', # 0xf9
'Gua ', # 0xfa
'Zhi ', # 0xfb
'Yun ', # 0xfc
'Cheng ', # 0xfd
'Chan ', # 0xfe
'Dai ', # 0xff
)
|
Looney4444/Facial-Detection | refs/heads/master | src/main.py | 1 | # AJ Looney
# 9/6/15
# FacialDetection
from videoCapture import VideoCapture
from detectFace import DetectFace
def main():
vid = VideoCapture()
face = DetectFace(vid, smile=True, eye=False)
face.activate()
vid.release()
if __name__ == '__main__':
main()
|
BorgERP/borg-erp-6of3 | refs/heads/master | addons/account_analytic_plans/report/__init__.py | 445 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crossovered_analytic
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
supergis/micropython | refs/heads/master | tests/basics/generator_return.py | 99 | def gen():
yield 1
return 42
g = gen()
print(next(g))
try:
print(next(g))
except StopIteration as e:
print(repr(e))
|
akretion/odoo | refs/heads/12-patch-paging-100-in-o2m | addons/web_editor/models/ir_http.py | 25 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models
from odoo.http import request
from odoo.osv import expression
class IrHttp(models.AbstractModel):
_inherit = 'ir.http'
@classmethod
def _dispatch(cls):
context = dict(request.context)
if 'editable' in request.httprequest.args and 'editable' not in context:
context['editable'] = True
if 'edit_translations' in request.httprequest.args and 'edit_translations' not in context:
context['edit_translations'] = True
if context.get('edit_translations') and 'translatable' not in context:
context['translatable'] = True
request.context = context
return super(IrHttp, cls)._dispatch()
@classmethod
def _get_translation_frontend_modules_domain(cls):
domain = super(IrHttp, cls)._get_translation_frontend_modules_domain()
return expression.OR([domain, [('name', '=', 'web_editor')]])
|
mSenyor/kivy | refs/heads/master | kivy/tests/test_issue_1091.py | 55 | import unittest
from kivy.uix.stacklayout import StackLayout
from kivy.uix.widget import Widget
class PaddingSpacingTestCase(unittest.TestCase):
def test_tb_lr_stacklayout(self):
stacklayout = StackLayout(
orientation='tb-lr',
size=(200, 200),
padding=20,
spacing=10)
widget = Widget(width=100, size_hint=(0.2, 0.4))
stacklayout.add_widget(widget)
stacklayout.do_layout()
self.assertEqual(stacklayout.top - widget.top, 20)
|
BobBall/sm | refs/heads/master | drivers/LVHDSR.py | 6 | #!/usr/bin/python
#
# Copyright (C) Citrix Systems Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; version 2.1 only.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# LVHDSR: VHD on LVM storage repository
#
import SR
import VDI
import SRCommand
import util
import lvutil
import lvmcache
import vhdutil
import lvhdutil
import scsiutil
import time
import os, sys
import xml.dom.minidom
import errno
import xs_errors
import cleanup
import blktap2
from journaler import Journaler
from lock import Lock
from refcounter import RefCounter
from ipc import IPCFlag
from lvmanager import LVActivator
import XenAPI
import re
from srmetadata import ALLOCATION_TAG, NAME_LABEL_TAG, NAME_DESCRIPTION_TAG, \
UUID_TAG, IS_A_SNAPSHOT_TAG, SNAPSHOT_OF_TAG, TYPE_TAG, VDI_TYPE_TAG, \
READ_ONLY_TAG, MANAGED_TAG, SNAPSHOT_TIME_TAG, METADATA_OF_POOL_TAG, \
requiresUpgrade, LVMMetadataHandler, METADATA_OBJECT_TYPE_VDI, \
METADATA_OBJECT_TYPE_SR, METADATA_UPDATE_OBJECT_TYPE_TAG
from metadata import retrieveXMLfromFile, _parseXML
from xmlrpclib import DateTime
import glob
DEV_MAPPER_ROOT = os.path.join('/dev/mapper', lvhdutil.VG_PREFIX)
geneology = {}
CAPABILITIES = ["SR_PROBE","SR_UPDATE", "SR_TRIM",
"VDI_CREATE","VDI_DELETE","VDI_ATTACH", "VDI_DETACH",
"VDI_CLONE", "VDI_SNAPSHOT", "VDI_RESIZE", "ATOMIC_PAUSE",
"VDI_RESET_ON_BOOT/2", "VDI_UPDATE"]
CONFIGURATION = [ ['device', 'local device path (required) (e.g. /dev/sda3)'] ]
DRIVER_INFO = {
'name': 'Local VHD on LVM',
'description': 'SR plugin which represents disks as VHD disks on ' + \
'Logical Volumes within a locally-attached Volume Group',
'vendor': 'XenSource Inc',
'copyright': '(C) 2008 XenSource Inc',
'driver_version': '1.0',
'required_api_version': '1.0',
'capabilities': CAPABILITIES,
'configuration': CONFIGURATION
}
PARAM_VHD = "vhd"
PARAM_RAW = "raw"
OPS_EXCLUSIVE = [
"sr_create", "sr_delete", "sr_attach", "sr_detach", "sr_scan",
"sr_update", "vdi_create", "vdi_delete", "vdi_resize", "vdi_snapshot",
"vdi_clone" ]
class LVHDSR(SR.SR):
DRIVER_TYPE = 'lvhd'
THIN_PROVISIONING_DEFAULT = False
THIN_PLUGIN = "lvhd-thin"
PLUGIN_ON_SLAVE = "on-slave"
FLAG_USE_VHD = "use_vhd"
MDVOLUME_NAME = "MGT"
LOCK_RETRY_INTERVAL = 3
LOCK_RETRY_ATTEMPTS = 10
TEST_MODE_KEY = "testmode"
TEST_MODE_VHD_FAIL_REPARENT_BEGIN = "vhd_fail_reparent_begin"
TEST_MODE_VHD_FAIL_REPARENT_LOCATOR = "vhd_fail_reparent_locator"
TEST_MODE_VHD_FAIL_REPARENT_END = "vhd_fail_reparent_end"
TEST_MODE_VHD_FAIL_RESIZE_BEGIN = "vhd_fail_resize_begin"
TEST_MODE_VHD_FAIL_RESIZE_DATA = "vhd_fail_resize_data"
TEST_MODE_VHD_FAIL_RESIZE_METADATA = "vhd_fail_resize_metadata"
TEST_MODE_VHD_FAIL_RESIZE_END = "vhd_fail_resize_end"
ENV_VAR_VHD_TEST = {
TEST_MODE_VHD_FAIL_REPARENT_BEGIN:
"VHD_UTIL_TEST_FAIL_REPARENT_BEGIN",
TEST_MODE_VHD_FAIL_REPARENT_LOCATOR:
"VHD_UTIL_TEST_FAIL_REPARENT_LOCATOR",
TEST_MODE_VHD_FAIL_REPARENT_END:
"VHD_UTIL_TEST_FAIL_REPARENT_END",
TEST_MODE_VHD_FAIL_RESIZE_BEGIN:
"VHD_UTIL_TEST_FAIL_RESIZE_BEGIN",
TEST_MODE_VHD_FAIL_RESIZE_DATA:
"VHD_UTIL_TEST_FAIL_RESIZE_DATA_MOVED",
TEST_MODE_VHD_FAIL_RESIZE_METADATA:
"VHD_UTIL_TEST_FAIL_RESIZE_METADATA_MOVED",
TEST_MODE_VHD_FAIL_RESIZE_END:
"VHD_UTIL_TEST_FAIL_RESIZE_END"
}
testMode = ""
legacyMode = True
def handles(type):
"""Returns True if this SR class understands the given dconf string"""
# we can pose as LVMSR or EXTSR for compatibility purposes
if __name__ == '__main__':
name = sys.argv[0]
else:
name = __name__
if name.endswith("LVMSR"):
return type == "lvm"
elif name.endswith("EXTSR"):
return type == "ext"
return type == LVHDSR.DRIVER_TYPE
handles = staticmethod(handles)
def load(self, sr_uuid):
self.ops_exclusive = OPS_EXCLUSIVE
if not self.dconf.has_key('device') or not self.dconf['device']:
raise xs_errors.XenError('ConfigDeviceMissing',)
self.root = self.dconf['device']
for dev in self.root.split(','):
if not self._isvalidpathstring(dev):
raise xs_errors.XenError('ConfigDeviceInvalid', \
opterr='path is %s' % dev)
self.isMaster = False
if self.dconf.has_key('SRmaster') and self.dconf['SRmaster'] == 'true':
self.isMaster = True
self.lock = Lock(vhdutil.LOCK_TYPE_SR, self.uuid)
self.sr_vditype = SR.DEFAULT_TAP
self.uuid = sr_uuid
self.vgname = lvhdutil.VG_PREFIX + self.uuid
self.path = os.path.join(lvhdutil.VG_LOCATION, self.vgname)
self.mdpath = os.path.join(self.path, self.MDVOLUME_NAME)
self.thinpr = self.THIN_PROVISIONING_DEFAULT
try:
self.lvmCache = lvmcache.LVMCache(self.vgname)
except:
raise xs_errors.XenError('SRUnavailable', \
opterr='Failed to initialise the LVMCache')
self.lvActivator = LVActivator(self.uuid, self.lvmCache)
self.journaler = Journaler(self.lvmCache)
if not self.srcmd.params.get("sr_ref"):
return # must be a probe call
# Test for thick vs thin provisioning conf parameter
if self.dconf.has_key('allocation'):
alloc = self.dconf['allocation']
if alloc == 'thin':
self.thinpr = True
elif alloc == 'thick':
self.thinpr = False
else:
raise xs_errors.XenError('InvalidArg', \
opterr='Allocation parameter must be either thick or thin')
self.other_conf = self.session.xenapi.SR.get_other_config(self.sr_ref)
if self.other_conf.get(self.TEST_MODE_KEY):
self.testMode = self.other_conf[self.TEST_MODE_KEY]
self._prepareTestMode()
self.sm_config = self.session.xenapi.SR.get_sm_config(self.sr_ref)
# sm_config flag overrides PBD
if self.sm_config.get('allocation') == "thick":
self.thinpr = False
elif self.sm_config.get('allocation') == "thin":
self.thinpr = True
if self.sm_config.get(self.FLAG_USE_VHD) == "true":
self.legacyMode = False
if lvutil._checkVG(self.vgname):
if self.isMaster and not self.cmd in ["vdi_attach", "vdi_detach",
"vdi_activate", "vdi_deactivate"]:
self._undoAllJournals()
if not self.cmd in ["sr_attach","sr_probe"]:
self._checkMetadataVolume()
self.mdexists = False
# get a VDI -> TYPE map from the storage
contains_uuid_regex = \
re.compile("^.*[0-9a-f]{8}-(([0-9a-f]{4})-){3}[0-9a-f]{12}.*")
self.storageVDIs = {}
for key in self.lvmCache.lvs.keys():
# if the lvname has a uuid in it
type = None
if contains_uuid_regex.search(key) != None:
if key.startswith(lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD]):
type = vhdutil.VDI_TYPE_VHD
vdi = key[len(lvhdutil.LV_PREFIX[type]):]
elif key.startswith(lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_RAW]):
type = vhdutil.VDI_TYPE_RAW
vdi = key[len(lvhdutil.LV_PREFIX[type]):]
else:
continue
if type != None:
self.storageVDIs[vdi] = type
# check if metadata volume exists
try:
self.mdexists = self.lvmCache.checkLV(self.MDVOLUME_NAME)
except:
pass
def cleanup(self):
# we don't need to hold the lock to dec refcounts of activated LVs
if not self.lvActivator.deactivateAll():
raise util.SMException("failed to deactivate LVs")
def updateSRMetadata(self, allocation):
try:
# Add SR specific SR metadata
sr_info = \
{ ALLOCATION_TAG: allocation,
UUID_TAG: self.uuid,
NAME_LABEL_TAG: util.to_plain_string\
(self.session.xenapi.SR.get_name_label(self.sr_ref)),
NAME_DESCRIPTION_TAG: util.to_plain_string\
(self.session.xenapi.SR.get_name_description(self.sr_ref))
}
vdi_info = {}
for vdi in self.session.xenapi.SR.get_VDIs(self.sr_ref):
vdi_uuid = self.session.xenapi.VDI.get_uuid(vdi)
# Create the VDI entry in the SR metadata
vdi_info[vdi_uuid] = \
{
UUID_TAG: vdi_uuid,
NAME_LABEL_TAG: util.to_plain_string\
(self.session.xenapi.VDI.get_name_label(vdi)),
NAME_DESCRIPTION_TAG: util.to_plain_string\
(self.session.xenapi.VDI.get_name_description(vdi)),
IS_A_SNAPSHOT_TAG: \
int(self.session.xenapi.VDI.get_is_a_snapshot(vdi)),
SNAPSHOT_OF_TAG: \
self.session.xenapi.VDI.get_snapshot_of(vdi),
SNAPSHOT_TIME_TAG: \
self.session.xenapi.VDI.get_snapshot_time(vdi),
TYPE_TAG: \
self.session.xenapi.VDI.get_type(vdi),
VDI_TYPE_TAG: \
self.session.xenapi.VDI.get_sm_config(vdi)\
['vdi_type'],
READ_ONLY_TAG: \
int(self.session.xenapi.VDI.get_read_only(vdi)),
METADATA_OF_POOL_TAG: \
self.session.xenapi.VDI.get_metadata_of_pool(vdi),
MANAGED_TAG: \
int(self.session.xenapi.VDI.get_managed(vdi))
}
LVMMetadataHandler(self.mdpath).writeMetadata(sr_info, vdi_info)
except Exception, e:
raise xs_errors.XenError('MetadataError', \
opterr='Error upgrading SR Metadata: %s' % str(e))
def syncMetadataAndStorage(self):
try:
# if a VDI is present in the metadata but not in the storage
# then delete it from the metadata
vdi_info = LVMMetadataHandler(self.mdpath, False).getMetadata()[1]
for vdi in vdi_info.keys():
update_map = {}
if not vdi_info[vdi][UUID_TAG] in set(self.storageVDIs.keys()):
# delete this from metadata
LVMMetadataHandler(self.mdpath).\
deleteVdiFromMetadata(vdi_info[vdi][UUID_TAG])
else:
# search for this in the metadata, compare types
# self.storageVDIs is a map of vdi_uuid to vdi_type
if vdi_info[vdi][VDI_TYPE_TAG] != \
self.storageVDIs[vdi_info[vdi][UUID_TAG]]:
# storage type takes authority
update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] \
= METADATA_OBJECT_TYPE_VDI
update_map[UUID_TAG] = vdi_info[vdi][UUID_TAG]
update_map[VDI_TYPE_TAG] = \
self.storageVDIs[vdi_info[vdi][UUID_TAG]]
LVMMetadataHandler(self.mdpath)\
.updateMetadata(update_map)
else:
# This should never happen
pass
except Exception, e:
raise xs_errors.XenError('MetadataError', \
opterr='Error synching SR Metadata and storage: %s' % str(e))
def syncMetadataAndXapi(self):
try:
# get metadata
(sr_info, vdi_info) = \
LVMMetadataHandler(self.mdpath, False).getMetadata()
# First synch SR parameters
self.update(self.uuid)
# Now update the VDI information in the metadata if required
for vdi_offset in vdi_info.keys():
try:
vdi_ref = \
self.session.xenapi.VDI.get_by_uuid(\
vdi_info[vdi_offset][UUID_TAG])
except:
# may be the VDI is not in XAPI yet dont bother
continue
new_name_label = util.to_plain_string\
(self.session.xenapi.VDI.get_name_label(vdi_ref))
new_name_description = util.to_plain_string\
(self.session.xenapi.VDI.get_name_description(vdi_ref))
if vdi_info[vdi_offset][NAME_LABEL_TAG] != new_name_label or \
vdi_info[vdi_offset][NAME_DESCRIPTION_TAG] != \
new_name_description:
update_map = {}
update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] = \
METADATA_OBJECT_TYPE_VDI
update_map[UUID_TAG] = vdi_info[vdi_offset][UUID_TAG]
update_map[NAME_LABEL_TAG] = new_name_label
update_map[NAME_DESCRIPTION_TAG] = new_name_description
LVMMetadataHandler(self.mdpath)\
.updateMetadata(update_map)
except Exception, e:
raise xs_errors.XenError('MetadataError', \
opterr='Error synching SR Metadata and XAPI: %s' % str(e))
def _checkMetadataVolume(self):
util.SMlog("Entering _checkMetadataVolume")
self.mdexists = self.lvmCache.checkLV(self.MDVOLUME_NAME)
if self.isMaster:
if self.mdexists and self.cmd == "sr_attach":
try:
# activate the management volume
# will be deactivated at detach time
self.lvmCache.activateNoRefcount(self.MDVOLUME_NAME)
self._synchSmConfigWithMetaData()
if requiresUpgrade(self.mdpath):
util.SMlog("This SR requires metadata upgrade.")
self.updateSRMetadata( \
self.session.xenapi.SR.get_sm_config(self.sr_ref)\
['allocation']
)
else:
util.SMlog("SR metadata upgrade not required.")
util.SMlog("Sync SR metadata and the state on the storage.")
self.syncMetadataAndStorage()
self.syncMetadataAndXapi()
except Exception, e:
util.SMlog("Exception in _checkMetadataVolume, " \
"Error: %s." % str(e))
elif not self.mdexists and not self.legacyMode:
self._introduceMetaDataVolume()
if self.mdexists:
self.legacyMode = False
def _synchSmConfigWithMetaData(self):
util.SMlog("Synching sm-config with metadata volume")
try:
# get SR info from metadata
sr_info = {}
map = {}
try:
# First try old metadata format
# CHECKME: this can be removed once we stop supporting upgrade
# from pre-6.0 pools
xml = retrieveXMLfromFile(self.mdpath)
sr_info = _parseXML(xml)
except Exception, e:
# That dint work, try new format valid 6.0 onwards
util.SMlog("Could not read SR info from metadata using old " \
"format, trying new format. Error: %s" % str(e))
sr_info = LVMMetadataHandler(self.mdpath,False).getMetadata()[0]
if sr_info == {}:
raise Exception("Failed to get SR information from metadata.")
if sr_info.has_key("allocation"):
if sr_info.get("allocation") == 'thick':
self.thinpr = False
map['allocation'] = 'thick'
elif sr_info.get("allocation") == 'thin':
self.thinpr = True
map['allocation'] = 'thin'
else:
raise Exception("Allocation key not found in SR metadata. " \
"SR info found: %s" % sr_info)
except Exception, e:
raise xs_errors.XenError('MetadataError', \
opterr='Error reading SR params from ' \
'metadata Volume: %s' % str(e))
try:
map[self.FLAG_USE_VHD] = 'true'
self.session.xenapi.SR.set_sm_config(self.sr_ref, map)
except:
raise xs_errors.XenError('MetadataError', \
opterr='Error updating sm_config key')
def _introduceMetaDataVolume(self):
util.SMlog("Creating Metadata volume")
try:
map = {}
self.lvmCache.create(self.MDVOLUME_NAME, 4*1024*1024)
# activate the management volume, will be deactivated at detach time
self.lvmCache.activateNoRefcount(self.MDVOLUME_NAME)
if self.thinpr:
allocstr = "thin"
else:
allocstr = "thick"
name_label = util.to_plain_string(\
self.session.xenapi.SR.get_name_label(self.sr_ref))
name_description = util.to_plain_string(\
self.session.xenapi.SR.get_name_description(self.sr_ref))
map[self.FLAG_USE_VHD] = "true"
map['allocation'] = allocstr
self.session.xenapi.SR.set_sm_config(self.sr_ref, map)
# Add the SR metadata
self.updateSRMetadata(allocstr)
except Exception, e:
raise xs_errors.XenError('MetadataError', \
opterr='Error introducing Metadata Volume: %s' % str(e))
def _removeMetadataVolume(self):
if self.mdexists:
try:
self.lvmCache.remove(self.MDVOLUME_NAME)
except:
raise xs_errors.XenError('MetadataError', \
opterr='Failed to delete MGT Volume')
def _refresh_size(self):
"""
Refreshs the size of the backing device.
Return true if all paths/devices agree on the same size.
"""
if hasattr(self, 'SCSIid'):
# LVHDoHBASR, LVHDoISCSISR
return scsiutil.refresh_lun_size_by_SCSIid(getattr(self, 'SCSIid'))
else:
# LVHDSR
devices = self.root.split(',')
scsiutil.refreshdev(devices)
return True
def _expand_size(self):
"""
Expands the size of the SR by growing into additional availiable
space, if extra space is availiable on the backing device.
Needs to be called after a successful call of _refresh_size.
"""
currentvgsize = lvutil._getVGstats(self.vgname)['physical_size']
# We are comparing PV- with VG-sizes that are aligned. Need a threshold
resizethreshold = 100*1024*1024 # 100MB
devices = self.root.split(',')
totaldevicesize = 0
for device in devices:
totaldevicesize = totaldevicesize + scsiutil.getsize(device)
if totaldevicesize >= (currentvgsize + resizethreshold):
try:
if hasattr(self, 'SCSIid'):
# LVHDoHBASR, LVHDoISCSISR might have slaves
scsiutil.refresh_lun_size_by_SCSIid_on_slaves(self.session,
getattr(self, 'SCSIid'))
util.SMlog("LVHDSR._expand_size for %s will resize the pv."
% self.uuid)
for device in devices:
lvutil.resizePV(device)
except:
util.logException("LVHDSR._expand_size for %s failed to resize"
" the PV" % self.uuid)
def create(self, uuid, size):
util.SMlog("LVHDSR.create for %s" % self.uuid)
if not self.isMaster:
util.SMlog('sr_create blocked for non-master')
raise xs_errors.XenError('LVMMaster')
if lvutil._checkVG(self.vgname):
raise xs_errors.XenError('SRExists')
# Check none of the devices already in use by other PBDs
if util.test_hostPBD_devs(self.session, uuid, self.root):
raise xs_errors.XenError('SRInUse')
# Check serial number entry in SR records
for dev in self.root.split(','):
if util.test_scsiserial(self.session, dev):
raise xs_errors.XenError('SRInUse')
lvutil.createVG(self.root, self.vgname)
#Update serial number string
scsiutil.add_serial_record(self.session, self.sr_ref, \
scsiutil.devlist_to_serialstring(self.root.split(',')))
# since this is an SR.create turn off legacy mode
self.session.xenapi.SR.add_to_sm_config(self.sr_ref, \
self.FLAG_USE_VHD, 'true')
def delete(self, uuid):
util.SMlog("LVHDSR.delete for %s" % self.uuid)
if not self.isMaster:
raise xs_errors.XenError('LVMMaster')
cleanup.gc_force(self.session, self.uuid)
success = True
for fileName in \
filter(lambda x: util.extractSRFromDevMapper(x) == self.uuid, \
glob.glob(DEV_MAPPER_ROOT + '*')):
if util.doesFileHaveOpenHandles(fileName):
util.SMlog("LVHDSR.delete: The dev mapper entry %s has open " \
"handles" % fileName)
success = False
continue
# Now attempt to remove the dev mapper entry
if not lvutil.removeDevMapperEntry(fileName):
success = False
continue
try:
lvname = os.path.basename(fileName.replace('-','/').\
replace('//', '-'))
os.unlink(os.path.join(self.path, lvname))
except Exception, e:
util.SMlog("LVHDSR.delete: failed to remove the symlink for " \
"file %s. Error: %s" % (fileName, str(e)))
success = False
if success:
try:
if util.pathexists(self.path):
os.rmdir(self.path)
except Exception, e:
util.SMlog("LVHDSR.delete: failed to remove the symlink " \
"directory %s. Error: %s" % (self.path, str(e)))
success = False
self._removeMetadataVolume()
self.lvmCache.refresh()
if len(lvhdutil.getLVInfo(self.lvmCache)) > 0:
raise xs_errors.XenError('SRNotEmpty')
if not success:
raise Exception("LVHDSR delete failed, please refer to the log " \
"for details.")
lvutil.removeVG(self.root, self.vgname)
self._cleanup()
def attach(self, uuid):
util.SMlog("LVHDSR.attach for %s" % self.uuid)
self._cleanup(True) # in case of host crashes, if detach wasn't called
if not util.match_uuid(self.uuid) or not lvutil._checkVG(self.vgname):
raise xs_errors.XenError('SRUnavailable', \
opterr='no such volume group: %s' % self.vgname)
# Refresh the metadata status
self._checkMetadataVolume()
refreshsizeok = self._refresh_size()
if self.isMaster:
if refreshsizeok:
self._expand_size()
#Update SCSIid string
util.SMlog("Calling devlist_to_serial")
scsiutil.add_serial_record(self.session, self.sr_ref, \
scsiutil.devlist_to_serialstring(self.root.split(',')))
# Test Legacy Mode Flag and update if VHD volumes exist
if self.isMaster and self.legacyMode:
vdiInfo = lvhdutil.getVDIInfo(self.lvmCache)
for uuid, info in vdiInfo.iteritems():
if info.vdiType == vhdutil.VDI_TYPE_VHD:
self.legacyMode = False
map = self.session.xenapi.SR.get_sm_config(self.sr_ref)
self._introduceMetaDataVolume()
break
# Set the block scheduler
for dev in self.root.split(','): self.block_setscheduler(dev)
def detach(self, uuid):
util.SMlog("LVHDSR.detach for %s" % self.uuid)
cleanup.abort(self.uuid)
# Do a best effort cleanup of the dev mapper entries
# go through all devmapper entries for this VG
success = True
for fileName in \
filter(lambda x: util.extractSRFromDevMapper(x) == self.uuid, \
glob.glob(DEV_MAPPER_ROOT + '*')):
# check if any file has open handles
if util.doesFileHaveOpenHandles(fileName):
# if yes, log this and signal failure
util.SMlog("LVHDSR.detach: The dev mapper entry %s has open " \
"handles" % fileName)
success = False
continue
# Now attempt to remove the dev mapper entry
if not lvutil.removeDevMapperEntry(fileName):
success = False
continue
# also remove the symlinks from /dev/VG-XenStorage-SRUUID/*
try:
lvname = os.path.basename(fileName.replace('-','/').\
replace('//', '-'))
lvname = os.path.join(self.path, lvname)
util.silent_noent(lvname)
except Exception, e:
util.SMlog("LVHDSR.detach: failed to remove the symlink for " \
"file %s. Error: %s" % (fileName, str(e)))
success = False
# now remove the directory where the symlinks are
# this should pass as the directory should be empty by now
if success:
try:
if util.pathexists(self.path):
os.rmdir(self.path)
except Exception, e:
util.SMlog("LVHDSR.detach: failed to remove the symlink " \
"directory %s. Error: %s" % (self.path, str(e)))
success = False
if not success:
raise Exception("SR detach failed, please refer to the log " \
"for details.")
# Don't delete lock files on the master as it will break the locking
# between SM and any GC thread that survives through SR.detach.
# However, we should still delete lock files on slaves as it is the
# only place to do so.
self._cleanup(self.isMaster)
def forget_vdi(self, uuid):
if not self.legacyMode:
LVMMetadataHandler(self.mdpath).deleteVdiFromMetadata(uuid)
super(LVHDSR, self).forget_vdi(uuid)
def scan(self, uuid):
try:
lvname = ''
activated = True
util.SMlog("LVHDSR.scan for %s" % self.uuid)
if not self.isMaster:
util.SMlog('sr_scan blocked for non-master')
raise xs_errors.XenError('LVMMaster')
if self._refresh_size():
self._expand_size()
self.lvmCache.refresh()
self._loadvdis()
stats = lvutil._getVGstats(self.vgname)
self.physical_size = stats['physical_size']
self.physical_utilisation = stats['physical_utilisation']
# Now check if there are any VDIs in the metadata, which are not in
# XAPI
if self.mdexists:
vdiToSnaps = {}
# get VDIs from XAPI
vdis = self.session.xenapi.SR.get_VDIs(self.sr_ref)
vdi_uuids = set([])
for vdi in vdis:
vdi_uuids.add(self.session.xenapi.VDI.get_uuid(vdi))
Dict = LVMMetadataHandler(self.mdpath, False).getMetadata()[1]
for vdi in Dict.keys():
vdi_uuid = Dict[vdi][UUID_TAG]
if bool(int(Dict[vdi][IS_A_SNAPSHOT_TAG])):
if vdiToSnaps.has_key(Dict[vdi][SNAPSHOT_OF_TAG]):
vdiToSnaps[Dict[vdi][SNAPSHOT_OF_TAG]].append(vdi_uuid)
else:
vdiToSnaps[Dict[vdi][SNAPSHOT_OF_TAG]] = [vdi_uuid]
if vdi_uuid not in vdi_uuids:
util.SMlog("Introduce VDI %s as it is present in " \
"metadata and not in XAPI." % vdi_uuid)
sm_config = {}
sm_config['vdi_type'] = Dict[vdi][VDI_TYPE_TAG]
lvname = "%s%s" % \
(lvhdutil.LV_PREFIX[sm_config['vdi_type']],vdi_uuid)
self.lvmCache.activateNoRefcount(lvname)
activated = True
lvPath = os.path.join(self.path, lvname)
if Dict[vdi][VDI_TYPE_TAG] == vhdutil.VDI_TYPE_RAW:
size = self.lvmCache.getSize( \
lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_RAW] + \
vdi_uuid)
utilisation = \
util.roundup(lvutil.LVM_SIZE_INCREMENT,
long(size))
else:
parent = \
vhdutil._getVHDParentNoCheck(lvPath)
if parent != None:
sm_config['vhd-parent'] = parent[len( \
lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD]):]
size = vhdutil.getSizeVirt(lvPath)
if self.thinpr:
utilisation = \
util.roundup(lvutil.LVM_SIZE_INCREMENT,
vhdutil.calcOverheadEmpty(lvhdutil.MSIZE))
else:
utilisation = lvhdutil.calcSizeVHDLV(long(size))
vdi_ref = self.session.xenapi.VDI.db_introduce(
vdi_uuid,
Dict[vdi][NAME_LABEL_TAG],
Dict[vdi][NAME_DESCRIPTION_TAG],
self.sr_ref,
Dict[vdi][TYPE_TAG],
False,
bool(int(Dict[vdi][READ_ONLY_TAG])),
{},
vdi_uuid,
{},
sm_config)
self.session.xenapi.VDI.set_managed(vdi_ref,
bool(int(Dict[vdi][MANAGED_TAG])))
self.session.xenapi.VDI.set_virtual_size(vdi_ref,
str(size))
self.session.xenapi.VDI.set_physical_utilisation( \
vdi_ref, str(utilisation))
self.session.xenapi.VDI.set_is_a_snapshot( \
vdi_ref, bool(int(Dict[vdi][IS_A_SNAPSHOT_TAG])))
if bool(int(Dict[vdi][IS_A_SNAPSHOT_TAG])):
self.session.xenapi.VDI.set_snapshot_time( \
vdi_ref, DateTime(Dict[vdi][SNAPSHOT_TIME_TAG]))
if Dict[vdi][TYPE_TAG] == 'metadata':
self.session.xenapi.VDI.set_metadata_of_pool( \
vdi_ref, Dict[vdi][METADATA_OF_POOL_TAG])
# Now set the snapshot statuses correctly in XAPI
for srcvdi in vdiToSnaps.keys():
try:
srcref = self.session.xenapi.VDI.get_by_uuid(srcvdi)
except:
# the source VDI no longer exists, continue
continue
for snapvdi in vdiToSnaps[srcvdi]:
try:
# this might fail in cases where its already set
snapref = \
self.session.xenapi.VDI.get_by_uuid(snapvdi)
self.session.xenapi.VDI.set_snapshot_of(snapref, srcref)
except Exception, e:
util.SMlog("Setting snapshot failed. "\
"Error: %s" % str(e))
ret = super(LVHDSR, self).scan(uuid)
self._kickGC()
return ret
finally:
if lvname != '' and activated:
self.lvmCache.deactivateNoRefcount(lvname)
def update(self, uuid):
if not lvutil._checkVG(self.vgname):
return
self._updateStats(uuid, 0)
if self.legacyMode:
return
# synch name_label in metadata with XAPI
update_map = {}
update_map = {METADATA_UPDATE_OBJECT_TYPE_TAG: \
METADATA_OBJECT_TYPE_SR,
NAME_LABEL_TAG: util.to_plain_string( \
self.session.xenapi.SR.get_name_label(self.sr_ref)),
NAME_DESCRIPTION_TAG: util.to_plain_string( \
self.session.xenapi.SR.get_name_description(self.sr_ref))
}
LVMMetadataHandler(self.mdpath).updateMetadata(update_map)
def _updateStats(self, uuid, virtAllocDelta):
valloc = int(self.session.xenapi.SR.get_virtual_allocation(self.sr_ref))
self.virtual_allocation = valloc + virtAllocDelta
stats = lvutil._getVGstats(self.vgname)
self.physical_size = stats['physical_size']
self.physical_utilisation = stats['physical_utilisation']
self._db_update()
def probe(self):
return lvutil.srlist_toxml(
lvutil.scan_srlist(lvhdutil.VG_PREFIX, self.root),
lvhdutil.VG_PREFIX,
(self.srcmd.params['sr_sm_config'].has_key('metadata') and \
self.srcmd.params['sr_sm_config']['metadata'] == 'true'))
def vdi(self, uuid):
return LVHDVDI(self, uuid)
def _loadvdis(self):
self.virtual_allocation = 0
self.vdiInfo = lvhdutil.getVDIInfo(self.lvmCache)
self.allVDIs = {}
for uuid, info in self.vdiInfo.iteritems():
if uuid.startswith(cleanup.SR.TMP_RENAME_PREFIX):
continue
if info.scanError:
raise xs_errors.XenError('VDIUnavailable', \
opterr='Error scanning VDI %s' % uuid)
self.vdis[uuid] = self.allVDIs[uuid] = self.vdi(uuid)
if not self.vdis[uuid].hidden:
self.virtual_allocation += self.vdis[uuid].utilisation
for uuid, vdi in self.vdis.iteritems():
if vdi.parent:
if self.vdis.has_key(vdi.parent):
self.vdis[vdi.parent].read_only = True
if geneology.has_key(vdi.parent):
geneology[vdi.parent].append(uuid)
else:
geneology[vdi.parent] = [uuid]
# Now remove all hidden leaf nodes to avoid introducing records that
# will be GC'ed
for uuid in self.vdis.keys():
if not geneology.has_key(uuid) and self.vdis[uuid].hidden:
util.SMlog("Scan found hidden leaf (%s), ignoring" % uuid)
del self.vdis[uuid]
def _ensureSpaceAvailable(self, amount_needed):
space_available = lvutil._getVGstats(self.vgname)['freespace']
if (space_available < amount_needed):
util.SMlog("Not enough space! free space: %d, need: %d" % \
(space_available, amount_needed))
raise xs_errors.XenError('SRNoSpace')
def _handleInterruptedCloneOps(self):
entries = self.journaler.getAll(LVHDVDI.JRN_CLONE)
for uuid, val in entries.iteritems():
util.fistpoint.activate("LVHDRT_clone_vdi_before_undo_clone",self.uuid)
self._handleInterruptedCloneOp(uuid, val)
util.fistpoint.activate("LVHDRT_clone_vdi_after_undo_clone",self.uuid)
self.journaler.remove(LVHDVDI.JRN_CLONE, uuid)
def _handleInterruptedCoalesceLeaf(self):
entries = self.journaler.getAll(cleanup.VDI.JRN_LEAF)
if len(entries) > 0:
util.SMlog("*** INTERRUPTED COALESCE-LEAF OP DETECTED ***")
cleanup.gc_force(self.session, self.uuid)
self.lvmCache.refresh()
def _handleInterruptedCloneOp(self, origUuid, jval, forceUndo = False):
"""Either roll back or finalize the interrupted snapshot/clone
operation. Rolling back is unsafe if the leaf VHDs have already been
in use and written to. However, it is always safe to roll back while
we're still in the context of the failed snapshot operation since the
VBD is paused for the duration of the operation"""
util.SMlog("*** INTERRUPTED CLONE OP: for %s (%s)" % (origUuid, jval))
lvs = lvhdutil.getLVInfo(self.lvmCache)
baseUuid, clonUuid = jval.split("_")
# is there a "base copy" VDI?
if not lvs.get(baseUuid):
# no base copy: make sure the original is there
if lvs.get(origUuid):
util.SMlog("*** INTERRUPTED CLONE OP: nothing to do")
return
raise util.SMException("base copy %s not present, "\
"but no original %s found" % (baseUuid, origUuid))
if forceUndo:
util.SMlog("Explicit revert")
self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid)
return
if not lvs.get(origUuid) or (clonUuid and not lvs.get(clonUuid)):
util.SMlog("One or both leaves missing => revert")
self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid)
return
vdis = lvhdutil.getVDIInfo(self.lvmCache)
if vdis[origUuid].scanError or (clonUuid and vdis[clonUuid].scanError):
util.SMlog("One or both leaves invalid => revert")
self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid)
return
orig = vdis[origUuid]
base = vdis[baseUuid]
self.lvActivator.activate(baseUuid, base.lvName, False)
self.lvActivator.activate(origUuid, orig.lvName, False)
if orig.parentUuid != baseUuid:
parent = vdis[orig.parentUuid]
self.lvActivator.activate(parent.uuid, parent.lvName, False)
origPath = os.path.join(self.path, orig.lvName)
if not vhdutil.check(origPath):
util.SMlog("Orig VHD invalid => revert")
self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid)
return
if clonUuid:
clon = vdis[clonUuid]
clonPath = os.path.join(self.path, clon.lvName)
self.lvActivator.activate(clonUuid, clon.lvName, False)
if not vhdutil.check(clonPath):
util.SMlog("Clon VHD invalid => revert")
self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid)
return
util.SMlog("Snapshot appears valid, will not roll back")
self._completeCloneOp(vdis, origUuid, baseUuid, clonUuid)
def _undoCloneOp(self, lvs, origUuid, baseUuid, clonUuid):
base = lvs[baseUuid]
basePath = os.path.join(self.path, base.name)
# make the parent RW
if base.readonly:
self.lvmCache.setReadonly(base.name, False)
ns = lvhdutil.NS_PREFIX_LVM + self.uuid
origRefcountBinary = RefCounter.check(origUuid, ns)[1]
origRefcountNormal = 0
# un-hide the parent
if base.vdiType == vhdutil.VDI_TYPE_VHD:
self.lvActivator.activate(baseUuid, base.name, False)
origRefcountNormal = 1
vhdInfo = vhdutil.getVHDInfo(basePath, lvhdutil.extractUuid, False)
if base.vdiType == vhdutil.VDI_TYPE_VHD and vhdInfo.hidden:
vhdutil.setHidden(basePath, False)
elif base.vdiType == vhdutil.VDI_TYPE_RAW and base.hidden:
self.lvmCache.setHidden(base.name, False)
# remove the child nodes
if clonUuid and lvs.get(clonUuid):
if lvs[clonUuid].vdiType != vhdutil.VDI_TYPE_VHD:
raise util.SMException("clone %s not VHD" % clonUuid)
self.lvmCache.remove(lvs[clonUuid].name)
if self.lvActivator.get(clonUuid, False):
self.lvActivator.remove(clonUuid, False)
if lvs.get(origUuid):
self.lvmCache.remove(lvs[origUuid].name)
# inflate the parent to fully-allocated size
if base.vdiType == vhdutil.VDI_TYPE_VHD:
fullSize = lvhdutil.calcSizeVHDLV(vhdInfo.sizeVirt)
lvhdutil.inflate(self.journaler, self.uuid, baseUuid, fullSize)
# rename back
origLV = lvhdutil.LV_PREFIX[base.vdiType] + origUuid
self.lvmCache.rename(base.name, origLV)
RefCounter.reset(baseUuid, ns)
if self.lvActivator.get(baseUuid, False):
self.lvActivator.replace(baseUuid, origUuid, origLV, False)
RefCounter.set(origUuid, origRefcountNormal, origRefcountBinary, ns)
# At this stage, tapdisk and SM vdi will be in paused state. Remove
# flag to facilitate vm deactivate
origVdiRef = self.session.xenapi.VDI.get_by_uuid(origUuid)
self.session.xenapi.VDI.remove_from_sm_config(origVdiRef, 'paused')
# update LVM metadata on slaves
slaves = util.get_slaves_attached_on(self.session, [origUuid])
lvhdutil.lvRefreshOnSlaves(self.session, self.uuid, self.vgname,
origLV, origUuid, slaves)
util.SMlog("*** INTERRUPTED CLONE OP: rollback success")
def _completeCloneOp(self, vdis, origUuid, baseUuid, clonUuid):
"""Finalize the interrupted snapshot/clone operation. This must not be
called from the live snapshot op context because we attempt to pause/
unpause the VBD here (the VBD is already paused during snapshot, so it
would cause a deadlock)"""
base = vdis[baseUuid]
clon = None
if clonUuid:
clon = vdis[clonUuid]
cleanup.abort(self.uuid)
# make sure the parent is hidden and read-only
if not base.hidden:
if base.vdiType == vhdutil.VDI_TYPE_RAW:
self.lvmCache.setHidden(base.lvName)
else:
basePath = os.path.join(self.path, base.lvName)
vhdutil.setHidden(basePath)
if not base.lvReadonly:
self.lvmCache.setReadonly(base.lvName, True)
# NB: since this snapshot-preserving call is only invoked outside the
# snapshot op context, we assume the LVM metadata on the involved slave
# has by now been refreshed and do not attempt to do it here
# Update the original record
try:
vdi_ref = self.session.xenapi.VDI.get_by_uuid(origUuid)
sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref)
type = self.session.xenapi.VDI.get_type(vdi_ref)
sm_config["vdi_type"] = vhdutil.VDI_TYPE_VHD
sm_config['vhd-parent'] = baseUuid
self.session.xenapi.VDI.set_sm_config(vdi_ref, sm_config)
except XenAPI.Failure:
util.SMlog("ERROR updating the orig record")
# introduce the new VDI records
if clonUuid:
try:
clon_vdi = VDI.VDI(self, clonUuid)
clon_vdi.read_only = False
clon_vdi.location = clonUuid
clon_vdi.utilisation = clon.sizeLV
clon_vdi.sm_config = {
"vdi_type": vhdutil.VDI_TYPE_VHD,
"vhd-parent": baseUuid }
if not self.legacyMode:
LVMMetadataHandler(self.mdpath).\
ensureSpaceIsAvailableForVdis(1)
clon_vdi_ref = clon_vdi._db_introduce()
util.SMlog("introduced clon VDI: %s (%s)" % \
(clon_vdi_ref, clonUuid))
vdi_info = { UUID_TAG: clonUuid,
NAME_LABEL_TAG: clon_vdi.label,
NAME_DESCRIPTION_TAG: clon_vdi.description,
IS_A_SNAPSHOT_TAG: 0,
SNAPSHOT_OF_TAG: '',
SNAPSHOT_TIME_TAG: '',
TYPE_TAG: type,
VDI_TYPE_TAG: clon_vdi.sm_config['vdi_type'],
READ_ONLY_TAG: int(clon_vdi.read_only),
MANAGED_TAG: int(clon_vdi.managed),
METADATA_OF_POOL_TAG: ''
}
if not self.legacyMode:
LVMMetadataHandler(self.mdpath).addVdi(vdi_info)
except XenAPI.Failure:
util.SMlog("ERROR introducing the clon record")
try:
base_vdi = VDI.VDI(self, baseUuid) # readonly parent
base_vdi.label = "base copy"
base_vdi.read_only = True
base_vdi.location = baseUuid
base_vdi.size = base.sizeVirt
base_vdi.utilisation = base.sizeLV
base_vdi.managed = False
base_vdi.sm_config = {
"vdi_type": vhdutil.VDI_TYPE_VHD,
"vhd-parent": baseUuid }
if not self.legacyMode:
LVMMetadataHandler(self.mdpath).ensureSpaceIsAvailableForVdis(1)
base_vdi_ref = base_vdi._db_introduce()
util.SMlog("introduced base VDI: %s (%s)" % \
(base_vdi_ref, baseUuid))
vdi_info = { UUID_TAG: baseUuid,
NAME_LABEL_TAG: base_vdi.label,
NAME_DESCRIPTION_TAG: base_vdi.description,
IS_A_SNAPSHOT_TAG: 0,
SNAPSHOT_OF_TAG: '',
SNAPSHOT_TIME_TAG: '',
TYPE_TAG: type,
VDI_TYPE_TAG: base_vdi.sm_config['vdi_type'],
READ_ONLY_TAG: int(base_vdi.read_only),
MANAGED_TAG: int(base_vdi.managed),
METADATA_OF_POOL_TAG: ''
}
if not self.legacyMode:
LVMMetadataHandler(self.mdpath).addVdi(vdi_info)
except XenAPI.Failure:
util.SMlog("ERROR introducing the base record")
util.SMlog("*** INTERRUPTED CLONE OP: complete")
def _undoAllJournals(self):
"""Undo all VHD & SM interrupted journaled operations. This call must
be serialized with respect to all operations that create journals"""
# undoing interrupted inflates must be done first, since undoing VHD
# ops might require inflations
self.lock.acquire()
try:
self._undoAllInflateJournals()
self._undoAllVHDJournals()
self._handleInterruptedCloneOps()
self._handleInterruptedCoalesceLeaf()
finally:
self.lock.release()
self.cleanup()
def _undoAllInflateJournals(self):
entries = self.journaler.getAll(lvhdutil.JRN_INFLATE)
if len(entries) == 0:
return
self._loadvdis()
for uuid, val in entries.iteritems():
vdi = self.vdis.get(uuid)
if vdi:
util.SMlog("Found inflate journal %s, deflating %s to %s" % \
(uuid, vdi.path, val))
if vdi.readonly:
self.lvmCache.setReadonly(vdi.lvname, False)
self.lvActivator.activate(uuid, vdi.lvname, False)
currSizeLV = self.lvmCache.getSize(vdi.lvname)
util.zeroOut(vdi.path, currSizeLV - vhdutil.VHD_FOOTER_SIZE,
vhdutil.VHD_FOOTER_SIZE)
lvhdutil.deflate(self.lvmCache, vdi.lvname, int(val))
if vdi.readonly:
self.lvmCache.setReadonly(vdi.lvname, True)
if "true" == self.session.xenapi.SR.get_shared(self.sr_ref):
lvhdutil.lvRefreshOnAllSlaves(self.session, self.uuid,
self.vgname, vdi.lvname, uuid)
self.journaler.remove(lvhdutil.JRN_INFLATE, uuid)
delattr(self,"vdiInfo")
delattr(self,"allVDIs")
def _undoAllVHDJournals(self):
"""check if there are VHD journals in existence and revert them"""
journals = lvhdutil.getAllVHDJournals(self.lvmCache)
if len(journals) == 0:
return
self._loadvdis()
for uuid, jlvName in journals:
vdi = self.vdis[uuid]
util.SMlog("Found VHD journal %s, reverting %s" % (uuid, vdi.path))
self.lvActivator.activate(uuid, vdi.lvname, False)
self.lvmCache.activateNoRefcount(jlvName)
fullSize = lvhdutil.calcSizeVHDLV(vdi.size)
lvhdutil.inflate(self.journaler, self.uuid, vdi.uuid, fullSize)
try:
jFile = os.path.join(self.path, jlvName)
vhdutil.revert(vdi.path, jFile)
except util.CommandException:
util.logException("VHD journal revert")
vhdutil.check(vdi.path)
util.SMlog("VHD revert failed but VHD ok: removing journal")
# Attempt to reclaim unused space
vhdInfo = vhdutil.getVHDInfo(vdi.path, lvhdutil.extractUuid, False)
NewSize = lvhdutil.calcSizeVHDLV(vhdInfo.sizeVirt)
if NewSize < fullSize:
lvhdutil.deflate(self.lvmCache, vdi.lvname, int(NewSize))
lvhdutil.lvRefreshOnAllSlaves(self.session, self.uuid,
self.vgname, vdi.lvname, uuid)
self.lvmCache.remove(jlvName)
delattr(self,"vdiInfo")
delattr(self,"allVDIs")
def _updateSlavesOnClone(self, hostRefs, origOldLV, origLV,
baseUuid, baseLV):
"""We need to reactivate the original LV on each slave (note that the
name for the original LV might change), as well as init the refcount
for the base LV"""
args = {"vgName" : self.vgname,
"action1": "deactivateNoRefcount",
"lvName1": origOldLV,
"action2": "refresh",
"lvName2": origLV,
"action3": "activate",
"ns3" : lvhdutil.NS_PREFIX_LVM + self.uuid,
"lvName3": baseLV,
"uuid3" : baseUuid}
masterRef = util.get_this_host_ref(self.session)
for hostRef in hostRefs:
if hostRef == masterRef:
continue
util.SMlog("Updating %s, %s, %s on slave %s" % \
(origOldLV, origLV, baseLV, hostRef))
rv = eval(self.session.xenapi.host.call_plugin(
hostRef, self.PLUGIN_ON_SLAVE, "multi", args))
util.SMlog("call-plugin returned: %s" % rv)
if not rv:
raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE)
def _cleanup(self, skipLockCleanup = False):
"""delete stale refcounter, flag, and lock files"""
RefCounter.resetAll(lvhdutil.NS_PREFIX_LVM + self.uuid)
IPCFlag(self.uuid).clearAll()
if not skipLockCleanup:
Lock.cleanupAll(self.uuid)
Lock.cleanupAll(lvhdutil.NS_PREFIX_LVM + self.uuid)
def _prepareTestMode(self):
util.SMlog("Test mode: %s" % self.testMode)
if self.ENV_VAR_VHD_TEST.get(self.testMode):
os.environ[self.ENV_VAR_VHD_TEST[self.testMode]] = "yes"
util.SMlog("Setting env %s" % self.ENV_VAR_VHD_TEST[self.testMode])
def _kickGC(self):
# don't bother if an instance already running (this is just an
# optimization to reduce the overhead of forking a new process if we
# don't have to, but the process will check the lock anyways)
lockRunning = Lock(cleanup.LOCK_TYPE_RUNNING, self.uuid)
if not lockRunning.acquireNoblock():
if cleanup.should_preempt(self.session, self.uuid):
util.SMlog("Aborting currently-running coalesce of garbage VDI")
try:
if not cleanup.abort(self.uuid, soft=True):
util.SMlog("The GC has already been scheduled to "
"re-start")
except util.CommandException, e:
if e.code != errno.ETIMEDOUT:
raise
util.SMlog('failed to abort the GC')
finally:
return
else:
util.SMlog("A GC instance already running, not kicking")
return
else:
lockRunning.release()
util.SMlog("Kicking GC")
cleanup.gc(self.session, self.uuid, True)
class LVHDVDI(VDI.VDI):
# 2TB - (Pad/bitmap + BAT + Header/footer)
# 2 * 1024 * 1024 - (4096 + 4 + 0.002) in MiB
MAX_VDI_SIZE_MB = 2093051
VHD_SIZE_INC = 2 * 1024 * 1024
MIN_VIRT_SIZE = 2 * 1024 * 1024
SNAPSHOT_SINGLE = 1 # true snapshot: 1 leaf, 1 read-only parent
SNAPSHOT_DOUBLE = 2 # regular snapshot/clone that creates 2 leaves
SNAPSHOT_INTERNAL = 3 # SNAPSHOT_SINGLE but don't update SR's virtual allocation
JRN_CLONE = "clone" # journal entry type for the clone operation
def load(self, vdi_uuid):
self.lock = self.sr.lock
self.lvActivator = self.sr.lvActivator
self.loaded = False
self.vdi_type = vhdutil.VDI_TYPE_VHD
if self.sr.legacyMode or util.fistpoint.is_active("xenrt_default_vdi_type_legacy"):
self.vdi_type = vhdutil.VDI_TYPE_RAW
self.uuid = vdi_uuid
self.location = self.uuid
self.exists = True
if hasattr(self.sr, "vdiInfo") and self.sr.vdiInfo.get(self.uuid):
self._initFromVDIInfo(self.sr.vdiInfo[self.uuid])
if self.parent:
self.sm_config_override['vhd-parent'] = self.parent
else:
self.sm_config_override['vhd-parent'] = None
return
# scan() didn't run: determine the type of the VDI manually
if self._determineType():
return
# the VDI must be in the process of being created
self.exists = False
if self.sr.srcmd.params.has_key("vdi_sm_config") and \
self.sr.srcmd.params["vdi_sm_config"].has_key("type"):
type = self.sr.srcmd.params["vdi_sm_config"]["type"]
if type == PARAM_RAW:
self.vdi_type = vhdutil.VDI_TYPE_RAW
elif type == PARAM_VHD:
self.vdi_type = vhdutil.VDI_TYPE_VHD
if self.sr.cmd == 'vdi_create' and self.sr.legacyMode:
raise xs_errors.XenError('VDICreate', \
opterr='Cannot create VHD type disk in legacy mode')
else:
raise xs_errors.XenError('VDICreate', opterr='bad type')
self.lvname = "%s%s" % (lvhdutil.LV_PREFIX[self.vdi_type], vdi_uuid)
self.path = os.path.join(self.sr.path, self.lvname)
def create(self, sr_uuid, vdi_uuid, size):
util.SMlog("LVHDVDI.create for %s" % self.uuid)
if not self.sr.isMaster:
raise xs_errors.XenError('LVMMaster')
if self.exists:
raise xs_errors.XenError('VDIExists')
if size / 1024 / 1024 > self.MAX_VDI_SIZE_MB:
raise xs_errors.XenError('VDISize',
opterr="VDI size cannot exceed %d MB" % \
self.MAX_VDI_SIZE_MB)
if size < self.MIN_VIRT_SIZE:
size = self.MIN_VIRT_SIZE
size = util.roundup(self.VHD_SIZE_INC, size)
util.SMlog("LVHDVDI.create: type = %s, %s (size=%s)" %\
(self.vdi_type, self.path, size))
lvSize = 0
if self.vdi_type == vhdutil.VDI_TYPE_RAW:
lvSize = util.roundup(lvutil.LVM_SIZE_INCREMENT, long(size))
else:
if self.sr.thinpr:
lvSize = util.roundup(lvutil.LVM_SIZE_INCREMENT,
vhdutil.calcOverheadEmpty(lvhdutil.MSIZE))
else:
lvSize = lvhdutil.calcSizeVHDLV(long(size))
self.sm_config = self.sr.srcmd.params["vdi_sm_config"]
self.sr._ensureSpaceAvailable(lvSize)
try:
self.sr.lvmCache.create(self.lvname, lvSize)
if self.vdi_type == vhdutil.VDI_TYPE_RAW:
self.size = self.sr.lvmCache.getSize(self.lvname)
else:
vhdutil.create(self.path, long(size), False, lvhdutil.MSIZE_MB)
self.size = vhdutil.getSizeVirt(self.path)
self.sr.lvmCache.deactivateNoRefcount(self.lvname)
except util.CommandException, e:
util.SMlog("Unable to create VDI");
self.sr.lvmCache.remove(self.lvname)
raise xs_errors.XenError('VDICreate', opterr='error %d' % e.code)
self.utilisation = lvSize
self.sm_config["vdi_type"] = self.vdi_type
if not self.sr.legacyMode:
LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1)
self.ref = self._db_introduce()
self.sr._updateStats(self.sr.uuid, self.size)
vdi_info = { UUID_TAG: self.uuid,
NAME_LABEL_TAG: util.to_plain_string(self.label),
NAME_DESCRIPTION_TAG: util.to_plain_string(self.description),
IS_A_SNAPSHOT_TAG: 0,
SNAPSHOT_OF_TAG: '',
SNAPSHOT_TIME_TAG: '',
TYPE_TAG: self.ty,
VDI_TYPE_TAG: self.vdi_type,
READ_ONLY_TAG: int(self.read_only),
MANAGED_TAG: int(self.managed),
METADATA_OF_POOL_TAG: ''
}
if not self.sr.legacyMode:
LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info)
return VDI.VDI.get_params(self)
def delete(self, sr_uuid, vdi_uuid):
util.SMlog("LVHDVDI.delete for %s" % self.uuid)
try:
self._loadThis()
except SR.SRException, e:
# Catch 'VDI doesn't exist' exception
if e.errno == 46:
return
raise
vdi_ref = self.sr.srcmd.params['vdi_ref']
if not self.session.xenapi.VDI.get_managed(vdi_ref):
raise xs_errors.XenError("VDIDelete", \
opterr="Deleting non-leaf node not permitted")
if not self.hidden:
self._markHidden()
self._db_forget()
# deactivate here because it might be too late to do it in the "final"
# step: GC might have removed the LV by then
if self.sr.lvActivator.get(self.uuid, False):
self.sr.lvActivator.deactivate(self.uuid, False)
self.sr._updateStats(self.sr.uuid, -self.size)
self.sr._kickGC()
def attach(self, sr_uuid, vdi_uuid):
util.SMlog("LVHDVDI.attach for %s" % self.uuid)
if self.sr.journaler.hasJournals(self.uuid):
raise xs_errors.XenError('VDIUnavailable',
opterr='Interrupted operation detected on this VDI, '
'scan SR first to trigger auto-repair')
writable = ('args' not in self.sr.srcmd.params) or \
(self.sr.srcmd.params['args'][0] == "true")
needInflate = True
if self.vdi_type == vhdutil.VDI_TYPE_RAW or not writable:
needInflate = False
else:
self._loadThis()
if self.utilisation >= lvhdutil.calcSizeVHDLV(self.size):
needInflate = False
if needInflate:
try:
self._prepareThin(True)
except:
util.logException("attach")
raise xs_errors.XenError('LVMProvisionAttach')
try:
return self._attach()
finally:
if not self.sr.lvActivator.deactivateAll():
util.SMlog("Failed to deactivate LVs back (%s)" % self.uuid)
def detach(self, sr_uuid, vdi_uuid):
util.SMlog("LVHDVDI.detach for %s" % self.uuid)
self._loadThis()
already_deflated = (self.utilisation < \
lvhdutil.calcSizeVHDLV(self.size))
needDeflate = True
if self.vdi_type == vhdutil.VDI_TYPE_RAW or already_deflated:
needDeflate = False
elif not self.sr.thinpr:
needDeflate = False
# except for snapshots, which are always deflated
vdi_ref = self.sr.srcmd.params['vdi_ref']
snap = self.session.xenapi.VDI.get_is_a_snapshot(vdi_ref)
if snap:
needDeflate = True
if needDeflate:
try:
self._prepareThin(False)
except:
util.logException("_prepareThin")
raise xs_errors.XenError('VDIUnavailable', opterr='deflate')
try:
self._detach()
finally:
if not self.sr.lvActivator.deactivateAll():
raise xs_errors.XenError("SMGeneral", opterr="deactivation")
# We only support offline resize
def resize(self, sr_uuid, vdi_uuid, size):
util.SMlog("LVHDVDI.resize for %s" % self.uuid)
if not self.sr.isMaster:
raise xs_errors.XenError('LVMMaster')
if size / 1024 / 1024 > self.MAX_VDI_SIZE_MB:
raise xs_errors.XenError('VDISize',
opterr="VDI size cannot exceed %d MB" % \
self.MAX_VDI_SIZE_MB)
self._loadThis()
if self.hidden:
raise xs_errors.XenError('VDIUnavailable', opterr='hidden VDI')
if size < self.size:
util.SMlog('vdi_resize: shrinking not supported: ' + \
'(current size: %d, new size: %d)' % (self.size, size))
raise xs_errors.XenError('VDISize', opterr='shrinking not allowed')
if size == self.size:
return VDI.VDI.get_params(self)
size = util.roundup(self.VHD_SIZE_INC, size)
if self.vdi_type == vhdutil.VDI_TYPE_RAW:
lvSizeOld = self.size
lvSizeNew = util.roundup(lvutil.LVM_SIZE_INCREMENT, size)
else:
lvSizeOld = self.utilisation
lvSizeNew = lvhdutil.calcSizeVHDLV(size)
if self.sr.thinpr:
# VDI is currently deflated, so keep it deflated
lvSizeNew = lvSizeOld
assert(lvSizeNew >= lvSizeOld)
spaceNeeded = lvSizeNew - lvSizeOld
self.sr._ensureSpaceAvailable(spaceNeeded)
oldSize = self.size
if self.vdi_type == vhdutil.VDI_TYPE_RAW:
self.sr.lvmCache.setSize(self.lvname, lvSizeNew)
self.size = self.sr.lvmCache.getSize(self.lvname)
self.utilisation = self.size
else:
if lvSizeNew != lvSizeOld:
lvhdutil.inflate(self.sr.journaler, self.sr.uuid, self.uuid,
lvSizeNew)
vhdutil.setSizeVirtFast(self.path, size)
self.size = vhdutil.getSizeVirt(self.path)
self.utilisation = self.sr.lvmCache.getSize(self.lvname)
vdi_ref = self.sr.srcmd.params['vdi_ref']
self.session.xenapi.VDI.set_virtual_size(vdi_ref, str(self.size))
self.session.xenapi.VDI.set_physical_utilisation(vdi_ref,
str(self.utilisation))
self.sr._updateStats(self.sr.uuid, self.size - oldSize)
return VDI.VDI.get_params(self)
def snapshot(self, sr_uuid, vdi_uuid):
# logically, "snapshot" should mean SNAPSHOT_SINGLE and "clone" should
# mean "SNAPSHOT_DOUBLE", but in practice we have to do SNAPSHOT_DOUBLE
# in both cases, unless driver_params overrides it
# TODO LVHDVDI.snapshot and FileVDI.snapshot are almost the same, merge?
snapType = self.SNAPSHOT_DOUBLE
if self.sr.srcmd.params['driver_params'].get("type"):
if self.sr.srcmd.params['driver_params']["type"] == "single":
snapType = self.SNAPSHOT_SINGLE
elif self.sr.srcmd.params['driver_params']["type"] == "internal":
snapType = self.SNAPSHOT_INTERNAL
secondary = None
if self.sr.srcmd.params['driver_params'].get("mirror"):
secondary = self.sr.srcmd.params['driver_params']["mirror"]
return self._do_snapshot(sr_uuid, vdi_uuid, snapType, secondary=secondary)
def clone(self, sr_uuid, vdi_uuid):
return self._do_snapshot(
sr_uuid, vdi_uuid, self.SNAPSHOT_DOUBLE, cloneOp=True)
def compose(self, sr_uuid, vdi1, vdi2):
util.SMlog("LVHDSR.compose for %s -> %s" % (vdi2, vdi1))
if self.vdi_type != vhdutil.VDI_TYPE_VHD:
raise xs_errors.XenError('Unimplemented')
parent_uuid = vdi1
parent_lvname = lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD] + parent_uuid
assert(self.sr.lvmCache.checkLV(parent_lvname))
parent_path = os.path.join(self.sr.path, parent_lvname)
self.sr.lvActivator.activate(self.uuid, self.lvname, False)
self.sr.lvActivator.activate(parent_uuid, parent_lvname, False)
vhdutil.setParent(self.path, parent_path, False)
vhdutil.setHidden(parent_path)
if not blktap2.VDI.tap_refresh(self.session, self.sr.uuid, self.uuid,
True):
raise util.SMException("failed to refresh VDI %s" % self.uuid)
util.SMlog("Compose done")
def reset_leaf(self, sr_uuid, vdi_uuid):
util.SMlog("LVHDSR.reset_leaf for %s" % vdi_uuid)
if self.vdi_type != vhdutil.VDI_TYPE_VHD:
raise xs_errors.XenError('Unimplemented')
self.sr.lvActivator.activate(self.uuid, self.lvname, False)
# safety check
if not vhdutil.hasParent(self.path):
raise util.SMException("ERROR: VDI %s has no parent, " + \
"will not reset contents" % self.uuid)
vhdutil.killData(self.path)
def _attach(self):
self._chainSetActive(True, True, True)
if not util.pathexists(self.path):
raise xs_errors.XenError('VDIUnavailable', \
opterr='Could not find: %s' % self.path)
if not hasattr(self,'xenstore_data'):
self.xenstore_data = {}
self.xenstore_data.update(scsiutil.update_XS_SCSIdata(self.uuid, \
scsiutil.gen_synthetic_page_data(self.uuid)))
self.xenstore_data['storage-type']='lvm'
self.xenstore_data['vdi-type']=self.vdi_type
self.attached = True
self.sr.lvActivator.persist()
return VDI.VDI.attach(self, self.sr.uuid, self.uuid)
def _detach(self):
self._chainSetActive(False, True)
self.attached = False
def _do_snapshot(self, sr_uuid, vdi_uuid, snapType, cloneOp=False, secondary=None):
if not blktap2.VDI.tap_pause(self.session, sr_uuid, vdi_uuid):
raise util.SMException("failed to pause VDI %s" % vdi_uuid)
snapResult = None
try:
snapResult = self._snapshot(snapType, cloneOp)
except Exception, e1:
try:
blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid, None)
except Exception, e2:
util.SMlog('WARNING: failed to clean up failed snapshot: '
'%s (error ignored)' % e2)
raise e1
blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid, secondary)
return snapResult
def _snapshot(self, snapType, cloneOp = False):
util.SMlog("LVHDVDI._snapshot for %s (type %s)" % (self.uuid, snapType))
if not self.sr.isMaster:
raise xs_errors.XenError('LVMMaster')
if self.sr.legacyMode:
raise xs_errors.XenError('Unimplemented', opterr='In legacy mode')
self._loadThis()
if self.hidden:
raise xs_errors.XenError('VDISnapshot', opterr='hidden VDI')
self.sm_config = self.session.xenapi.VDI.get_sm_config( \
self.sr.srcmd.params['vdi_ref'])
if self.sm_config.has_key("type") and self.sm_config['type']=='raw':
if not util.fistpoint.is_active("testsm_clone_allow_raw"):
raise xs_errors.XenError('Unimplemented', \
opterr='Raw VDI, snapshot or clone not permitted')
# we must activate the entire VHD chain because the real parent could
# theoretically be anywhere in the chain if all VHDs under it are empty
self._chainSetActive(True, False)
if not util.pathexists(self.path):
raise xs_errors.XenError('VDIUnavailable', \
opterr='VDI unavailable: %s' % (self.path))
if self.vdi_type == vhdutil.VDI_TYPE_VHD:
depth = vhdutil.getDepth(self.path)
if depth == -1:
raise xs_errors.XenError('VDIUnavailable', \
opterr='failed to get VHD depth')
elif depth >= vhdutil.MAX_CHAIN_SIZE:
raise xs_errors.XenError('SnapshotChainTooLong')
self.issnap = self.session.xenapi.VDI.get_is_a_snapshot( \
self.sr.srcmd.params['vdi_ref'])
fullpr = lvhdutil.calcSizeVHDLV(self.size)
thinpr = util.roundup(lvutil.LVM_SIZE_INCREMENT, \
vhdutil.calcOverheadEmpty(lvhdutil.MSIZE))
lvSizeOrig = thinpr
lvSizeClon = thinpr
hostRefs = []
if self.sr.cmd == "vdi_snapshot":
hostRefs = util.get_hosts_attached_on(self.session, [self.uuid])
if hostRefs:
lvSizeOrig = fullpr
if not self.sr.thinpr:
if not self.issnap:
lvSizeOrig = fullpr
if self.sr.cmd != "vdi_snapshot":
lvSizeClon = fullpr
if (snapType == self.SNAPSHOT_SINGLE or
snapType == self.SNAPSHOT_INTERNAL):
lvSizeClon = 0
# the space required must include 2 journal LVs: a clone journal and an
# inflate journal (for the failure handling
size_req = lvSizeOrig + lvSizeClon + 2 * self.sr.journaler.LV_SIZE
lvSizeBase = self.size
if self.vdi_type == vhdutil.VDI_TYPE_VHD:
lvSizeBase = util.roundup(lvutil.LVM_SIZE_INCREMENT,
vhdutil.getSizePhys(self.path))
size_req -= (self.utilisation - lvSizeBase)
self.sr._ensureSpaceAvailable(size_req)
baseUuid = util.gen_uuid()
origUuid = self.uuid
clonUuid = ""
if snapType == self.SNAPSHOT_DOUBLE:
clonUuid = util.gen_uuid()
jval = "%s_%s" % (baseUuid, clonUuid)
self.sr.journaler.create(self.JRN_CLONE, origUuid, jval)
util.fistpoint.activate("LVHDRT_clone_vdi_after_create_journal",self.sr.uuid)
try:
# self becomes the "base vdi"
origOldLV = self.lvname
baseLV = lvhdutil.LV_PREFIX[self.vdi_type] + baseUuid
self.sr.lvmCache.rename(self.lvname, baseLV)
self.sr.lvActivator.replace(self.uuid, baseUuid, baseLV, False)
RefCounter.set(baseUuid, 1, 0, lvhdutil.NS_PREFIX_LVM + self.sr.uuid)
self.uuid = baseUuid
self.lvname = baseLV
self.path = os.path.join(self.sr.path, baseLV)
self.label = "base copy"
self.read_only = True
self.location = self.uuid
self.managed = False
# shrink the base copy to the minimum - we do it before creating
# the snapshot volumes to avoid requiring double the space
if self.vdi_type == vhdutil.VDI_TYPE_VHD:
lvhdutil.deflate(self.sr.lvmCache, self.lvname, lvSizeBase)
self.utilisation = lvSizeBase
util.fistpoint.activate("LVHDRT_clone_vdi_after_shrink_parent", self.sr.uuid)
snapVDI = self._createSnap(origUuid, lvSizeOrig, False)
util.fistpoint.activate("LVHDRT_clone_vdi_after_first_snap", self.sr.uuid)
snapVDI2 = None
if snapType == self.SNAPSHOT_DOUBLE:
snapVDI2 = self._createSnap(clonUuid, lvSizeClon, True)
util.fistpoint.activate("LVHDRT_clone_vdi_after_second_snap", self.sr.uuid)
# note: it is important to mark the parent hidden only AFTER the
# new VHD children have been created, which are referencing it;
# otherwise we would introduce a race with GC that could reclaim
# the parent before we snapshot it
if self.vdi_type == vhdutil.VDI_TYPE_RAW:
self.sr.lvmCache.setHidden(self.lvname)
else:
vhdutil.setHidden(self.path)
util.fistpoint.activate("LVHDRT_clone_vdi_after_parent_hidden", self.sr.uuid)
# set the base copy to ReadOnly
self.sr.lvmCache.setReadonly(self.lvname, True)
util.fistpoint.activate("LVHDRT_clone_vdi_after_parent_ro", self.sr.uuid)
if hostRefs:
self.sr._updateSlavesOnClone(hostRefs, origOldLV,
snapVDI.lvname, self.uuid, self.lvname)
except (util.SMException, XenAPI.Failure), e:
util.logException("LVHDVDI._snapshot")
self._failClone(origUuid, jval, str(e))
util.fistpoint.activate("LVHDRT_clone_vdi_before_remove_journal",self.sr.uuid)
self.sr.journaler.remove(self.JRN_CLONE, origUuid)
return self._finishSnapshot(snapVDI, snapVDI2, cloneOp, snapType)
def _createSnap(self, snapUuid, snapSizeLV, isNew):
"""Snapshot self and return the snapshot VDI object"""
snapLV = lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD] + snapUuid
snapPath = os.path.join(self.sr.path, snapLV)
self.sr.lvmCache.create(snapLV, long(snapSizeLV))
util.fistpoint.activate("LVHDRT_clone_vdi_after_lvcreate", self.sr.uuid)
if isNew:
RefCounter.set(snapUuid, 1, 0, lvhdutil.NS_PREFIX_LVM + self.sr.uuid)
self.sr.lvActivator.add(snapUuid, snapLV, False)
parentRaw = (self.vdi_type == vhdutil.VDI_TYPE_RAW)
vhdutil.snapshot(snapPath, self.path, parentRaw, lvhdutil.MSIZE_MB)
snapParent = vhdutil.getParent(snapPath, lvhdutil.extractUuid)
snapVDI = VDI.VDI(self.sr, snapUuid)
snapVDI.read_only = False
snapVDI.location = snapUuid
snapVDI.size = self.size
snapVDI.utilisation = snapSizeLV
snapVDI.sm_config = dict()
for key, val in self.sm_config.iteritems():
if key not in ["type", "vdi_type", "vhd-parent", "paused"] and \
not key.startswith("host_"):
snapVDI.sm_config[key] = val
snapVDI.sm_config["vdi_type"] = vhdutil.VDI_TYPE_VHD
snapVDI.sm_config["vhd-parent"] = snapParent
snapVDI.lvname = snapLV
return snapVDI
def _finishSnapshot(self, snapVDI, snapVDI2, cloneOp = False, snapType=None):
if snapType is not self.SNAPSHOT_INTERNAL:
self.sr._updateStats(self.sr.uuid, self.size)
basePresent = True
# Verify parent locator field of both children and delete basePath if
# unused
snapParent = snapVDI.sm_config["vhd-parent"]
snap2Parent = ""
if snapVDI2:
snap2Parent = snapVDI2.sm_config["vhd-parent"]
if snapParent != self.uuid and \
(not snapVDI2 or snap2Parent != self.uuid):
util.SMlog("%s != %s != %s => deleting unused base %s" % \
(snapParent, self.uuid, snap2Parent, self.lvname))
self.sr.lvmCache.remove(self.lvname)
self.sr.lvActivator.remove(self.uuid, False)
basePresent = False
else:
# assign the _binary_ refcount of the original VDI to the new base
# VDI (but as the normal refcount, since binary refcounts are only
# for leaf nodes). The normal refcount of the child is not
# transferred to to the base VDI because normal refcounts are
# incremented and decremented individually, and not based on the
# VHD chain (i.e., the child's normal refcount will be decremented
# independently of its parent situation). Add 1 for this clone op.
# Note that we do not need to do protect the refcount operations
# below with per-VDI locking like we do in lvutil because at this
# point we have exclusive access to the VDIs involved. Other SM
# operations are serialized by the Agent or with the SR lock, and
# any coalesce activations are serialized with the SR lock. (The
# coalesce activates the coalesced VDI pair in the beginning, which
# cannot affect the VDIs here because they cannot possibly be
# involved in coalescing at this point, and at the relinkSkip step
# that activates the children, which takes the SR lock.)
ns = lvhdutil.NS_PREFIX_LVM + self.sr.uuid
(cnt, bcnt) = RefCounter.check(snapVDI.uuid, ns)
RefCounter.set(self.uuid, bcnt + 1, 0, ns)
# the "paused" and "host_*" sm-config keys are special and must stay on
# the leaf without being inherited by anyone else
for key in filter(lambda x: x == "paused" or x.startswith("host_"),
self.sm_config.keys()):
snapVDI.sm_config[key] = self.sm_config[key]
del self.sm_config[key]
# Introduce any new VDI records & update the existing one
type = self.session.xenapi.VDI.get_type( \
self.sr.srcmd.params['vdi_ref'])
if snapVDI2:
LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1)
vdiRef = snapVDI2._db_introduce()
if cloneOp:
vdi_info = { UUID_TAG: snapVDI2.uuid,
NAME_LABEL_TAG: util.to_plain_string(\
self.session.xenapi.VDI.get_name_label( \
self.sr.srcmd.params['vdi_ref'])),
NAME_DESCRIPTION_TAG: util.to_plain_string(\
self.session.xenapi.VDI.get_name_description\
(self.sr.srcmd.params['vdi_ref'])),
IS_A_SNAPSHOT_TAG: 0,
SNAPSHOT_OF_TAG: '',
SNAPSHOT_TIME_TAG: '',
TYPE_TAG: type,
VDI_TYPE_TAG: snapVDI2.sm_config['vdi_type'],
READ_ONLY_TAG: 0,
MANAGED_TAG: int(snapVDI2.managed),
METADATA_OF_POOL_TAG: ''
}
else:
util.SMlog("snapshot VDI params: %s" % \
self.session.xenapi.VDI.get_snapshot_time(vdiRef))
vdi_info = { UUID_TAG: snapVDI2.uuid,
NAME_LABEL_TAG: util.to_plain_string(\
self.session.xenapi.VDI.get_name_label( \
self.sr.srcmd.params['vdi_ref'])),
NAME_DESCRIPTION_TAG: util.to_plain_string(\
self.session.xenapi.VDI.get_name_description\
(self.sr.srcmd.params['vdi_ref'])),
IS_A_SNAPSHOT_TAG: 1,
SNAPSHOT_OF_TAG: snapVDI.uuid,
SNAPSHOT_TIME_TAG: '',
TYPE_TAG: type,
VDI_TYPE_TAG: snapVDI2.sm_config['vdi_type'],
READ_ONLY_TAG: 0,
MANAGED_TAG: int(snapVDI2.managed),
METADATA_OF_POOL_TAG: ''
}
LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info)
util.SMlog("vdi_clone: introduced 2nd snap VDI: %s (%s)" % \
(vdiRef, snapVDI2.uuid))
if basePresent:
LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1)
vdiRef = self._db_introduce()
vdi_info = { UUID_TAG: self.uuid,
NAME_LABEL_TAG: self.label,
NAME_DESCRIPTION_TAG: self.description,
IS_A_SNAPSHOT_TAG: 0,
SNAPSHOT_OF_TAG: '',
SNAPSHOT_TIME_TAG: '',
TYPE_TAG: type,
VDI_TYPE_TAG: self.sm_config['vdi_type'],
READ_ONLY_TAG: 1,
MANAGED_TAG: 0,
METADATA_OF_POOL_TAG: ''
}
LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info)
util.SMlog("vdi_clone: introduced base VDI: %s (%s)" % \
(vdiRef, self.uuid))
# Update the original record
vdi_ref = self.sr.srcmd.params['vdi_ref']
self.session.xenapi.VDI.set_sm_config(vdi_ref, snapVDI.sm_config)
self.session.xenapi.VDI.set_physical_utilisation(vdi_ref, \
str(snapVDI.utilisation))
# Return the info on the new snap VDI
snap = snapVDI2
if not snap:
snap = self
if not basePresent:
# a single-snapshot of an empty VDI will be a noop, resulting
# in no new VDIs, so return the existing one. The GC wouldn't
# normally try to single-snapshot an empty VHD of course, but
# if an external snapshot operation manages to sneak in right
# before a snapshot-coalesce phase, we would get here
snap = snapVDI
return snap.get_params()
def _initFromVDIInfo(self, vdiInfo):
self.vdi_type = vdiInfo.vdiType
self.lvname = vdiInfo.lvName
self.size = vdiInfo.sizeVirt
self.utilisation = vdiInfo.sizeLV
self.hidden = vdiInfo.hidden
if self.hidden:
self.managed = False
self.active = vdiInfo.lvActive
self.readonly = vdiInfo.lvReadonly
self.parent = vdiInfo.parentUuid
self.path = os.path.join(self.sr.path, self.lvname)
if hasattr(self, "sm_config_override"):
self.sm_config_override["vdi_type"] = self.vdi_type
else:
self.sm_config_override = {'vdi_type': self.vdi_type}
self.loaded = True
def _initFromLVInfo(self, lvInfo):
self.vdi_type = lvInfo.vdiType
self.lvname = lvInfo.name
self.size = lvInfo.size
self.utilisation = lvInfo.size
self.hidden = lvInfo.hidden
self.active = lvInfo.active
self.readonly = lvInfo.readonly
self.parent = ''
self.path = os.path.join(self.sr.path, self.lvname)
if hasattr(self, "sm_config_override"):
self.sm_config_override["vdi_type"] = self.vdi_type
else:
self.sm_config_override = {'vdi_type': self.vdi_type}
if self.vdi_type == vhdutil.VDI_TYPE_RAW:
self.loaded = True
def _initFromVHDInfo(self, vhdInfo):
self.size = vhdInfo.sizeVirt
self.parent = vhdInfo.parentUuid
self.hidden = vhdInfo.hidden
self.loaded = True
def _determineType(self):
"""Determine whether this is a raw or a VHD VDI"""
if self.sr.srcmd.params.has_key("vdi_ref"):
vdi_ref = self.sr.srcmd.params["vdi_ref"]
sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref)
if sm_config.get("vdi_type"):
self.vdi_type = sm_config["vdi_type"]
prefix = lvhdutil.LV_PREFIX[self.vdi_type]
self.lvname = "%s%s" % (prefix, self.uuid)
self.path = os.path.join(self.sr.path, self.lvname)
self.sm_config_override = sm_config
return True
# LVM commands can be costly, so check the file directly first in case
# the LV is active
found = False
for t in lvhdutil.VDI_TYPES:
lvname = "%s%s" % (lvhdutil.LV_PREFIX[t], self.uuid)
path = os.path.join(self.sr.path, lvname)
if util.pathexists(path):
if found:
raise xs_errors.XenError('VDILoad',
opterr="multiple VDI's: uuid %s" % self.uuid)
found = True
self.vdi_type = t
self.lvname = lvname
self.path = path
if found:
return True
# now list all LV's
if not lvutil._checkVG(self.sr.vgname):
# when doing attach_from_config, the VG won't be there yet
return False
lvs = lvhdutil.getLVInfo(self.sr.lvmCache)
if lvs.get(self.uuid):
self._initFromLVInfo(lvs[self.uuid])
return True
return False
def _loadThis(self):
"""Load VDI info for this VDI and activate the LV if it's VHD. We
don't do it in VDI.load() because not all VDI operations need it."""
if self.loaded:
if self.vdi_type == vhdutil.VDI_TYPE_VHD:
self.sr.lvActivator.activate(self.uuid, self.lvname, False)
return
try:
lvs = lvhdutil.getLVInfo(self.sr.lvmCache, self.lvname)
except util.CommandException, e:
raise xs_errors.XenError('VDIUnavailable',
opterr= '%s (LV scan error)' % os.strerror(abs(e.code)))
if not lvs.get(self.uuid):
raise xs_errors.XenError('VDIUnavailable', opterr='LV not found')
self._initFromLVInfo(lvs[self.uuid])
if self.vdi_type == vhdutil.VDI_TYPE_VHD:
self.sr.lvActivator.activate(self.uuid, self.lvname, False)
vhdInfo = vhdutil.getVHDInfo(self.path, lvhdutil.extractUuid, False)
if not vhdInfo:
raise xs_errors.XenError('VDIUnavailable', \
opterr='getVHDInfo failed')
self._initFromVHDInfo(vhdInfo)
self.loaded = True
def _chainSetActive(self, active, binary, persistent = False):
if binary:
(count, bcount) = RefCounter.checkLocked(self.uuid,
lvhdutil.NS_PREFIX_LVM + self.sr.uuid)
if (active and bcount > 0) or (not active and bcount == 0):
return # this is a redundant activation/deactivation call
vdiList = {self.uuid: self.lvname}
if self.vdi_type == vhdutil.VDI_TYPE_VHD:
vdiList = vhdutil.getParentChain(self.lvname,
lvhdutil.extractUuid, self.sr.vgname)
for uuid, lvName in vdiList.iteritems():
binaryParam = binary
if uuid != self.uuid:
binaryParam = False # binary param only applies to leaf nodes
if active:
self.sr.lvActivator.activate(uuid, lvName, binaryParam,
persistent)
else:
# just add the LVs for deactivation in the final (cleanup)
# step. The LVs must not have been activated during the current
# operation
self.sr.lvActivator.add(uuid, lvName, binaryParam)
def _failClone(self, uuid, jval, msg):
try:
self.sr._handleInterruptedCloneOp(uuid, jval, True)
self.sr.journaler.remove(self.JRN_CLONE, uuid)
except Exception, e:
util.SMlog('WARNING: failed to clean up failed snapshot: ' \
' %s (error ignored)' % e)
raise xs_errors.XenError('VDIClone', opterr=msg)
def _markHidden(self):
if self.vdi_type == vhdutil.VDI_TYPE_RAW:
self.sr.lvmCache.setHidden(self.lvname)
else:
vhdutil.setHidden(self.path)
self.hidden = 1
def _prepareThin(self, attach):
origUtilisation = self.sr.lvmCache.getSize(self.lvname)
if self.sr.isMaster:
# the master can prepare the VDI locally
if attach:
lvhdutil.attachThin(self.sr.journaler, self.sr.uuid, self.uuid)
else:
lvhdutil.detachThin(self.session, self.sr.lvmCache,
self.sr.uuid, self.uuid)
else:
fn = "attach"
if not attach:
fn = "detach"
pools = self.session.xenapi.pool.get_all()
master = self.session.xenapi.pool.get_master(pools[0])
rv = self.session.xenapi.host.call_plugin(
master, self.sr.THIN_PLUGIN, fn,
{"srUuid": self.sr.uuid, "vdiUuid": self.uuid})
util.SMlog("call-plugin returned: %s" % rv)
if not rv:
raise Exception('plugin %s failed' % self.sr.THIN_PLUGIN)
# refresh to pick up the size change on this slave
self.sr.lvmCache.activateNoRefcount(self.lvname, True)
self.utilisation = self.sr.lvmCache.getSize(self.lvname)
if origUtilisation != self.utilisation:
vdi_ref = self.sr.srcmd.params['vdi_ref']
self.session.xenapi.VDI.set_physical_utilisation(vdi_ref,
str(self.utilisation))
stats = lvutil._getVGstats(self.sr.vgname)
sr_utilisation = stats['physical_utilisation']
self.session.xenapi.SR.set_physical_utilisation(self.sr.sr_ref,
str(sr_utilisation))
def update(self, sr_uuid, vdi_uuid):
if self.sr.legacyMode:
return
#Synch the name_label of this VDI on storage with the name_label in XAPI
vdi_ref = self.session.xenapi.VDI.get_by_uuid(self.uuid)
update_map = {}
update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] = \
METADATA_OBJECT_TYPE_VDI
update_map[UUID_TAG] = self.uuid
update_map[NAME_LABEL_TAG] = util.to_plain_string(\
self.session.xenapi.VDI.get_name_label(vdi_ref))
update_map[NAME_DESCRIPTION_TAG] = util.to_plain_string(\
self.session.xenapi.VDI.get_name_description(vdi_ref))
update_map[SNAPSHOT_TIME_TAG] = \
self.session.xenapi.VDI.get_snapshot_time(vdi_ref)
update_map[METADATA_OF_POOL_TAG] = \
self.session.xenapi.VDI.get_metadata_of_pool(vdi_ref)
LVMMetadataHandler(self.sr.mdpath).updateMetadata(update_map)
if __name__ == '__main__':
SRCommand.run(LVHDSR, DRIVER_INFO)
else:
SR.registerSR(LVHDSR)
|
SonnyBrooks/ProjectEulerSwift | refs/heads/master | ProjectEulerSwift/Problem13.py | 1 | myArr = []
myArr.append(37107287533902102798797998220837590246510135740250)
myArr.append(46376937677490009712648124896970078050417018260538)
myArr.append(74324986199524741059474233309513058123726617309629)
myArr.append(91942213363574161572522430563301811072406154908250)
myArr.append(23067588207539346171171980310421047513778063246676)
myArr.append(89261670696623633820136378418383684178734361726757)
myArr.append(28112879812849979408065481931592621691275889832738)
myArr.append(44274228917432520321923589422876796487670272189318)
myArr.append(47451445736001306439091167216856844588711603153276)
myArr.append(70386486105843025439939619828917593665686757934951)
myArr.append(62176457141856560629502157223196586755079324193331)
myArr.append(64906352462741904929101432445813822663347944758178)
myArr.append(92575867718337217661963751590579239728245598838407)
myArr.append(58203565325359399008402633568948830189458628227828)
myArr.append(80181199384826282014278194139940567587151170094390)
myArr.append(35398664372827112653829987240784473053190104293586)
myArr.append(86515506006295864861532075273371959191420517255829)
myArr.append(71693888707715466499115593487603532921714970056938)
myArr.append(54370070576826684624621495650076471787294438377604)
myArr.append(53282654108756828443191190634694037855217779295145)
myArr.append(36123272525000296071075082563815656710885258350721)
myArr.append(45876576172410976447339110607218265236877223636045)
myArr.append(17423706905851860660448207621209813287860733969412)
myArr.append(81142660418086830619328460811191061556940512689692)
myArr.append(51934325451728388641918047049293215058642563049483)
myArr.append(62467221648435076201727918039944693004732956340691)
myArr.append(15732444386908125794514089057706229429197107928209)
myArr.append(55037687525678773091862540744969844508330393682126)
myArr.append(18336384825330154686196124348767681297534375946515)
myArr.append(80386287592878490201521685554828717201219257766954)
myArr.append(78182833757993103614740356856449095527097864797581)
myArr.append(16726320100436897842553539920931837441497806860984)
myArr.append(48403098129077791799088218795327364475675590848030)
myArr.append(87086987551392711854517078544161852424320693150332)
myArr.append(59959406895756536782107074926966537676326235447210)
myArr.append(69793950679652694742597709739166693763042633987085)
myArr.append(41052684708299085211399427365734116182760315001271)
myArr.append(65378607361501080857009149939512557028198746004375)
myArr.append(35829035317434717326932123578154982629742552737307)
myArr.append(94953759765105305946966067683156574377167401875275)
myArr.append(88902802571733229619176668713819931811048770190271)
myArr.append(25267680276078003013678680992525463401061632866526)
myArr.append(36270218540497705585629946580636237993140746255962)
myArr.append(24074486908231174977792365466257246923322810917141)
myArr.append(91430288197103288597806669760892938638285025333403)
myArr.append(34413065578016127815921815005561868836468420090470)
myArr.append(23053081172816430487623791969842487255036638784583)
myArr.append(11487696932154902810424020138335124462181441773470)
myArr.append(63783299490636259666498587618221225225512486764533)
myArr.append(67720186971698544312419572409913959008952310058822)
myArr.append(95548255300263520781532296796249481641953868218774)
myArr.append(76085327132285723110424803456124867697064507995236)
myArr.append(37774242535411291684276865538926205024910326572967)
myArr.append(23701913275725675285653248258265463092207058596522)
myArr.append(29798860272258331913126375147341994889534765745501)
myArr.append(18495701454879288984856827726077713721403798879715)
myArr.append(38298203783031473527721580348144513491373226651381)
myArr.append(34829543829199918180278916522431027392251122869539)
myArr.append(40957953066405232632538044100059654939159879593635)
myArr.append(29746152185502371307642255121183693803580388584903)
myArr.append(41698116222072977186158236678424689157993532961922)
myArr.append(62467957194401269043877107275048102390895523597457)
myArr.append(23189706772547915061505504953922979530901129967519)
myArr.append(86188088225875314529584099251203829009407770775672)
myArr.append(11306739708304724483816533873502340845647058077308)
myArr.append(82959174767140363198008187129011875491310547126581)
myArr.append(97623331044818386269515456334926366572897563400500)
myArr.append(42846280183517070527831839425882145521227251250327)
myArr.append(55121603546981200581762165212827652751691296897789)
myArr.append(32238195734329339946437501907836945765883352399886)
myArr.append(75506164965184775180738168837861091527357929701337)
myArr.append(62177842752192623401942399639168044983993173312731)
myArr.append(32924185707147349566916674687634660915035914677504)
myArr.append(99518671430235219628894890102423325116913619626622)
myArr.append(73267460800591547471830798392868535206946944540724)
myArr.append(76841822524674417161514036427982273348055556214818)
myArr.append(97142617910342598647204516893989422179826088076852)
myArr.append(87783646182799346313767754307809363333018982642090)
myArr.append(10848802521674670883215120185883543223812876952786)
myArr.append(71329612474782464538636993009049310363619763878039)
myArr.append(62184073572399794223406235393808339651327408011116)
myArr.append(66627891981488087797941876876144230030984490851411)
myArr.append(60661826293682836764744779239180335110989069790714)
myArr.append(85786944089552990653640447425576083659976645795096)
myArr.append(66024396409905389607120198219976047599490197230297)
myArr.append(64913982680032973156037120041377903785566085089252)
myArr.append(16730939319872750275468906903707539413042652315011)
myArr.append(94809377245048795150954100921645863754710598436791)
myArr.append(78639167021187492431995700641917969777599028300699)
myArr.append(15368713711936614952811305876380278410754449733078)
myArr.append(40789923115535562561142322423255033685442488917353)
myArr.append(44889911501440648020369068063960672322193204149535)
myArr.append(41503128880339536053299340368006977710650566631954)
myArr.append(81234880673210146739058568557934581403627822703280)
myArr.append(82616570773948327592232845941706525094512325230608)
myArr.append(22918802058777319719839450180888072429661980811197)
myArr.append(77158542502016545090413245809786882778948721859617)
myArr.append(72107838435069186155435662884062257473692284509516)
myArr.append(20849603980134001723930671666823555245252804609722)
myArr.append(53503534226472524250874054075591789781264330331690)
myTot = 0
for num in myArr:
myTot += num
print(myTot)
|
PlushBeaver/FanFicFare | refs/heads/master | fanficfare/adapters/adapter_ficwadcom.py | 1 | # -*- coding: utf-8 -*-
# Copyright 2011 Fanficdownloader team, 2015 FanFicFare team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import logging
logger = logging.getLogger(__name__)
import re
import urllib2
import time
import httplib, urllib
from .. import exceptions as exceptions
from ..htmlcleanup import stripHTML
from base_adapter import BaseSiteAdapter, makeDate
class FicwadComSiteAdapter(BaseSiteAdapter):
def __init__(self, config, url):
BaseSiteAdapter.__init__(self, config, url)
self.story.setMetadata('siteabbrev','fw')
# get storyId from url--url validation guarantees second part is storyId
self.story.setMetadata('storyId',self.parsedUrl.path.split('/',)[2])
self.username = "NoneGiven"
self.password = ""
@staticmethod
def getSiteDomain():
return 'ficwad.com'
@classmethod
def getSiteExampleURLs(cls):
return "http://ficwad.com/story/1234"
def getSiteURLPattern(self):
return re.escape(r"http://"+self.getSiteDomain())+"/story/\d+?$"
def performLogin(self,url):
params = {}
if self.password:
params['username'] = self.username
params['password'] = self.password
else:
params['username'] = self.getConfig("username")
params['password'] = self.getConfig("password")
loginUrl = 'http://' + self.getSiteDomain() + '/account/login'
logger.debug("Will now login to URL (%s) as (%s)" % (loginUrl,
params['username']))
d = self._postUrl(loginUrl,params,usecache=False)
if "Login attempt failed..." in d:
logger.info("Failed to login to URL %s as %s" % (loginUrl,
params['username']))
raise exceptions.FailedToLogin(url,params['username'])
return False
else:
return True
def use_pagecache(self):
'''
adapters that will work with the page cache need to implement
this and change it to True.
'''
return True
def extractChapterUrlsAndMetadata(self):
# fetch the chapter. From that we will get almost all the
# metadata and chapter list
url = self.url
logger.debug("URL: "+url)
# use BeautifulSoup HTML parser to make everything easier to find.
try:
data = self._fetchUrl(url)
# non-existent/removed story urls get thrown to the front page.
if "<h4>Featured Story</h4>" in data:
raise exceptions.StoryDoesNotExist(self.url)
soup = self.make_soup(data)
except urllib2.HTTPError, e:
if e.code == 404:
raise exceptions.StoryDoesNotExist(self.url)
else:
raise e
# if blocked, attempt login.
if soup.find("div",{"class":"blocked"}) or soup.find("li",{"class":"blocked"}):
if self.performLogin(url): # performLogin raises
# FailedToLogin if it fails.
soup = self.make_soup(self._fetchUrl(url,usecache=False))
divstory = soup.find('div',id='story')
storya = divstory.find('a',href=re.compile("^/story/\d+$"))
if storya : # if there's a story link in the divstory header, this is a chapter page.
# normalize story URL on chapter list.
self.story.setMetadata('storyId',storya['href'].split('/',)[2])
url = "http://"+self.getSiteDomain()+storya['href']
logger.debug("Normalizing to URL: "+url)
self._setURL(url)
try:
soup = self.make_soup(self._fetchUrl(url))
except urllib2.HTTPError, e:
if e.code == 404:
raise exceptions.StoryDoesNotExist(self.url)
else:
raise e
# if blocked, attempt login.
if soup.find("div",{"class":"blocked"}) or soup.find("li",{"class":"blocked"}):
if self.performLogin(url): # performLogin raises
# FailedToLogin if it fails.
soup = self.make_soup(self._fetchUrl(url,usecache=False))
# title - first h4 tag will be title.
titleh4 = soup.find('div',{'class':'storylist'}).find('h4')
self.story.setMetadata('title', stripHTML(titleh4.a))
# Find authorid and URL from... author url.
a = soup.find('span',{'class':'author'}).find('a', href=re.compile(r"^/a/"))
self.story.setMetadata('authorId',a['href'].split('/')[2])
self.story.setMetadata('authorUrl','http://'+self.host+a['href'])
self.story.setMetadata('author',a.string)
# description
storydiv = soup.find("div",{"id":"story"})
self.setDescription(url,storydiv.find("blockquote",{'class':'summary'}).p)
#self.story.setMetadata('description', storydiv.find("blockquote",{'class':'summary'}).p.string)
# most of the meta data is here:
metap = storydiv.find("p",{"class":"meta"})
self.story.addToList('category',metap.find("a",href=re.compile(r"^/category/\d+")).string)
# warnings
# <span class="req"><a href="/help/38" title="Medium Spoilers">[!!] </a> <a href="/help/38" title="Rape/Sexual Violence">[R] </a> <a href="/help/38" title="Violence">[V] </a> <a href="/help/38" title="Child/Underage Sex">[Y] </a></span>
spanreq = metap.find("span",{"class":"story-warnings"})
if spanreq: # can be no warnings.
for a in spanreq.findAll("a"):
self.story.addToList('warnings',a['title'])
## perhaps not the most efficient way to parse this, using
## regexps for each rather than something more complex, but
## IMO, it's more readable and amenable to change.
metastr = stripHTML(unicode(metap)).replace('\n',' ').replace('\t',' ').replace(u'\u00a0',' ')
m = re.match(r".*?Rating: (.+?) -.*?",metastr)
if m:
self.story.setMetadata('rating', m.group(1))
m = re.match(r".*?Genres: (.+?) -.*?",metastr)
if m:
for g in m.group(1).split(','):
self.story.addToList('genre',g)
m = re.match(r".*?Characters: (.*?) -.*?",metastr)
if m:
for g in m.group(1).split(','):
if g:
self.story.addToList('characters',g)
m = re.match(r".*?Published: ([0-9-]+?) -.*?",metastr)
if m:
self.story.setMetadata('datePublished',makeDate(m.group(1), "%Y-%m-%d"))
# Updated can have more than one space after it. <shrug>
m = re.match(r".*?Updated: ([0-9-]+?) +-.*?",metastr)
if m:
self.story.setMetadata('dateUpdated',makeDate(m.group(1), "%Y-%m-%d"))
m = re.match(r".*? - ([0-9,]+?) words.*?",metastr)
if m:
self.story.setMetadata('numWords',m.group(1))
if metastr.endswith("Complete"):
self.story.setMetadata('status', 'Completed')
else:
self.story.setMetadata('status', 'In-Progress')
# get the chapter list first this time because that's how we
# detect the need to login.
storylistul = soup.find('ul',{'class':'storylist'})
if not storylistul:
# no list found, so it's a one-chapter story.
self.chapterUrls.append((self.story.getMetadata('title'),url))
else:
chapterlistlis = storylistul.findAll('li')
for chapterli in chapterlistlis:
if "blocked" in chapterli['class']:
# paranoia check. We should already be logged in by now.
raise exceptions.FailedToLogin(url,self.username)
else:
#print "chapterli.h4.a (%s)"%chapterli.h4.a
self.chapterUrls.append((chapterli.h4.a.string,
u'http://%s%s'%(self.getSiteDomain(),
chapterli.h4.a['href'])))
#print "self.chapterUrls:%s"%self.chapterUrls
self.story.setMetadata('numChapters',len(self.chapterUrls))
return
def getChapterText(self, url):
logger.debug('Getting chapter text from: %s' % url)
soup = self.make_soup(self._fetchUrl(url))
span = soup.find('div', {'id' : 'storytext'})
if None == span:
raise exceptions.FailedToDownload("Error downloading Chapter: %s! Missing required element!" % url)
return self.utf8FromSoup(url,span)
def getClass():
return FicwadComSiteAdapter
|
pritamsamadder048/libforensics | refs/heads/master | code/lf/apps/msoffice/word/metadata.py | 13 | # Copyright 2009 Michael Murr
#
# This file is part of LibForensics.
#
# LibForensics is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LibForensics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with LibForensics. If not, see <http://www.gnu.org/licenses/>.
"""
Metadata from Microsoft Word documents.
.. moduleauthor:: Michael Murr (mmurr@codeforensics.net)
"""
__docformat__ = "restructuredtext en"
__all__ = [
"WordMetadata"
]
from datetime import date
from lf.apps.msoffice.word.objects import Fib, SttbShortUnicode
class WordMetadata():
"""
Represents metadata from a Microsoft Word document.
.. attribute:: magic
The magic number from the FIB.
.. attribute:: version
The file format version from the FIB.
.. attribute:: lang_id
The language identifier from the FIB.
.. attribute:: encryption_key
The encryption key from the FIB.
.. attribute:: is_template
True if the document is a template.
.. attribute:: is_glossary
True if the document is a glossary.
.. attribute:: is_complex
True if the document is in complex fast-saved format.
.. attribute:: has_pictures
True if the document has pictures.
.. attribute:: is_encrypted
True if the document is encrypted.
.. attribute:: is_far_east_encoded
True if the document is encoded for the far east.
.. attribute:: created_environment
The environment the document was created in.
.. attribute:: saved_mac
True if the document was last saved on a Mac.
.. attribute:: magic_created_by
The magic number of the application that created the document.
.. attribute:: magic_revised_by
The magic number of the application that last revised the document.
.. attribute:: created_build_date
The build date of the application that created the document.
.. attribute:: revised_build_date
The build date of the application that last revised the document.
.. attribute:: last_saved_by
A list of the last authors to save the document.
.. attribute:: last_saved_locations
A list of the last locations the document was saved to (correspond
with last_saved_by)
.. attribute:: associated_strings
Associated strings.
.. attribute:: users_roles
A list of (user name, role) pairs for protected content.
"""
def __init__(self, cfb):
"""
Initializes a WordMetadata object.
:parameters:
cfb
A CompoundFile object for the word document.
"""
for entry in cfb.dir_entries.values():
if entry.name == "WordDocument":
stream_id = entry.sid
# end if
# end for
fib = Fib(cfb.get_stream(stream_id))
if fib.header.whichTblStm:
table_name = "1Table"
else:
table_name = "0Table"
# end if
for entry in cfb.dir_entries.values():
if entry.name == table_name:
stream_id = entry.sid
# end if
# end for
table_stream = cfb.get_stream(stream_id, ignore_size=True)
self.magic = fib.header.wIdent
self.version = fib.header.nFib
self.lang_id = fib.header.lid
self.encryption_key = fib.header.lKey
self.is_template = bool(fib.header.dot)
self.is_glossary = bool(fib.header.glsy)
self.is_complex = bool(fib.header.complex)
self.has_pictures = bool(fib.header.hasPic)
self.is_encrypted = bool(fib.header.encrypted)
self.is_far_east_encoded = bool(fib.header.farEast)
self.saved_mac = bool(fib.header.mac)
self.created_environment = fib.header.envr
self.magic_created_by = fib.shorts.wMagicCreated
self.magic_revised_by = fib.shorts.wMagicRevised
created_date = fib.longs.lProductCreated
year = (created_date % 100) + 1900
day = (created_date // 100) % 100
month = (created_date // 10000) % 100
self.created_build_date = date(year, month, day)
revised_date = fib.longs.lProductRevised
year = (revised_date % 100) + 1900
day = (revised_date // 100) % 100
month = (revised_date // 10000) % 100
self.revised_build_date = date(year, month, day)
if fib.fc_lcb.sttbSavedBy.lcb:
saved_by = SttbShortUnicode(
table_stream, fib.fc_lcb.sttbSavedBy.fc
)
last_saved_by = list(saved_by.data[::2])
last_saved_locations = list(saved_by.data[1::2])
else:
last_saved_by = list()
last_saved_locations = list()
# end if
if fib.fc_lcb.sttbfAssoc.lcb:
assoc = SttbShortUnicode(table_stream, fib.fc_lcb.sttbfAssoc.fc)
associated_strings = assoc.data
else:
associated_strings = list()
# end if
if hasattr(fib.fc_lcb, "sttbProtUser"):
if fib.fc_lcb.sttbProtUser.lcb:
prot_users = SttbShortUnicode(
table_stream, fib.fc_lcb.sttbProtUser.fc
)
users_roles = list(zip(prot_users.data, prot_users.extra_data))
else:
users_roles = list()
# end if
else:
users_roles = list()
# end if
self.last_saved_by = last_saved_by
self.last_saved_locations = last_saved_locations
self.associated_strings = associated_strings
self.users_roles = users_roles
# end def __init__
# end class WordMetadata
|
CCI-MOC/python-novaclient | refs/heads/moc-modified | novaclient/shell.py | 1 | # Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Command-line interface to the OpenStack Nova API.
"""
from __future__ import print_function
import argparse
import getpass
import glob
import imp
import itertools
import logging
import os
import pkgutil
import sys
import time
from keystoneclient.auth.identity.generic import password
from keystoneclient.auth.identity.generic import token
from keystoneclient.auth.identity import v3 as identity
from keystoneclient import session as ksession
from oslo.utils import encodeutils
from oslo.utils import strutils
import pkg_resources
import six
HAS_KEYRING = False
all_errors = ValueError
try:
import keyring
HAS_KEYRING = True
except ImportError:
pass
import novaclient
import novaclient.auth_plugin
from novaclient import client
from novaclient import exceptions as exc
import novaclient.extension
from novaclient.i18n import _
from novaclient.openstack.common import cliutils
from novaclient import utils
from novaclient.v2 import shell as shell_v2
DEFAULT_OS_COMPUTE_API_VERSION = "2"
DEFAULT_NOVA_ENDPOINT_TYPE = 'publicURL'
# NOTE(cyeoh): Having the service type dependent on the API version
# is pretty ugly, but we have to do this because traditionally the
# catalog entry for compute points directly to the V2 API rather than
# the root, and then doing version discovery.
DEFAULT_NOVA_SERVICE_TYPE_MAP = {'1.1': 'compute',
'2': 'compute',
'3': 'computev3'}
logger = logging.getLogger(__name__)
def positive_non_zero_float(text):
if text is None:
return None
try:
value = float(text)
except ValueError:
msg = _("%s must be a float") % text
raise argparse.ArgumentTypeError(msg)
if value <= 0:
msg = _("%s must be greater than 0") % text
raise argparse.ArgumentTypeError(msg)
return value
class SecretsHelper(object):
def __init__(self, args, client):
self.args = args
self.client = client
self.key = None
self._password = None
def _validate_string(self, text):
if text is None or len(text) == 0:
return False
return True
def _make_key(self):
if self.key is not None:
return self.key
keys = [
self.client.auth_url,
self.client.projectid,
self.client.user,
self.client.region_name,
self.client.endpoint_type,
self.client.service_type,
self.client.service_name,
self.client.volume_service_name,
]
for (index, key) in enumerate(keys):
if key is None:
keys[index] = '?'
else:
keys[index] = str(keys[index])
self.key = "/".join(keys)
return self.key
def _prompt_password(self, verify=True):
pw = None
if hasattr(sys.stdin, 'isatty') and sys.stdin.isatty():
# Check for Ctl-D
try:
while True:
pw1 = getpass.getpass('OS Password: ')
if verify:
pw2 = getpass.getpass('Please verify: ')
else:
pw2 = pw1
if pw1 == pw2 and self._validate_string(pw1):
pw = pw1
break
except EOFError:
pass
return pw
def save(self, auth_token, management_url, tenant_id):
if not HAS_KEYRING or not self.args.os_cache:
return
if (auth_token == self.auth_token and
management_url == self.management_url):
# Nothing changed....
return
if not all([management_url, auth_token, tenant_id]):
raise ValueError(_("Unable to save empty management url/auth "
"token"))
value = "|".join([str(auth_token),
str(management_url),
str(tenant_id)])
keyring.set_password("novaclient_auth", self._make_key(), value)
@property
def password(self):
# Cache password so we prompt user at most once
if self._password:
pass
elif self._validate_string(self.args.os_password):
self._password = self.args.os_password
else:
verify_pass = strutils.bool_from_string(
cliutils.env("OS_VERIFY_PASSWORD", default=False), True)
self._password = self._prompt_password(verify_pass)
if not self._password:
raise exc.CommandError(
'Expecting a password provided via either '
'--os-password, env[OS_PASSWORD], or '
'prompted response')
return self._password
@property
def management_url(self):
if not HAS_KEYRING or not self.args.os_cache:
return None
management_url = None
try:
block = keyring.get_password('novaclient_auth', self._make_key())
if block:
_token, management_url, _tenant_id = block.split('|', 2)
except all_errors:
pass
return management_url
@property
def auth_token(self):
# Now is where it gets complicated since we
# want to look into the keyring module, if it
# exists and see if anything was provided in that
# file that we can use.
if not HAS_KEYRING or not self.args.os_cache:
return None
token = None
try:
block = keyring.get_password('novaclient_auth', self._make_key())
if block:
token, _management_url, _tenant_id = block.split('|', 2)
except all_errors:
pass
return token
@property
def tenant_id(self):
if not HAS_KEYRING or not self.args.os_cache:
return None
tenant_id = None
try:
block = keyring.get_password('novaclient_auth', self._make_key())
if block:
_token, _management_url, tenant_id = block.split('|', 2)
except all_errors:
pass
return tenant_id
class NovaClientArgumentParser(argparse.ArgumentParser):
def __init__(self, *args, **kwargs):
super(NovaClientArgumentParser, self).__init__(*args, **kwargs)
def error(self, message):
"""error(message: string)
Prints a usage message incorporating the message to stderr and
exits.
"""
self.print_usage(sys.stderr)
# FIXME(lzyeval): if changes occur in argparse.ArgParser._check_value
choose_from = ' (choose from'
progparts = self.prog.partition(' ')
self.exit(2, _("error: %(errmsg)s\nTry '%(mainp)s help %(subp)s'"
" for more information.\n") %
{'errmsg': message.split(choose_from)[0],
'mainp': progparts[0],
'subp': progparts[2]})
def _get_option_tuples(self, option_string):
"""returns (action, option, value) candidates for an option prefix
Returns [first candidate] if all candidates refers to current and
deprecated forms of the same options: "nova boot ... --key KEY"
parsing succeed because --key could only match --key-name,
--key_name which are current/deprecated forms of the same option.
"""
option_tuples = (super(NovaClientArgumentParser, self)
._get_option_tuples(option_string))
if len(option_tuples) > 1:
normalizeds = [option.replace('_', '-')
for action, option, value in option_tuples]
if len(set(normalizeds)) == 1:
return option_tuples[:1]
return option_tuples
class OpenStackComputeShell(object):
times = []
def _append_global_identity_args(self, parser):
# Register the CLI arguments that have moved to the session object.
ksession.Session.register_cli_options(parser)
parser.set_defaults(insecure=cliutils.env('NOVACLIENT_INSECURE',
default=False))
identity.Password.register_argparse_arguments(parser)
parser.set_defaults(os_username=cliutils.env('OS_USERNAME',
'NOVA_USERNAME'))
parser.set_defaults(os_password=cliutils.env('OS_PASSWORD',
'NOVA_PASSWORD'))
parser.set_defaults(os_auth_url=cliutils.env('OS_AUTH_URL',
'NOVA_URL'))
def get_base_parser(self):
parser = NovaClientArgumentParser(
prog='nova',
description=__doc__.strip(),
epilog='See "nova help COMMAND" '
'for help on a specific command.',
add_help=False,
formatter_class=OpenStackHelpFormatter,
)
# Global arguments
parser.add_argument(
'-h', '--help',
action='store_true',
help=argparse.SUPPRESS,
)
parser.add_argument('--version',
action='version',
version=novaclient.__version__)
parser.add_argument(
'--debug',
default=False,
action='store_true',
help=_("Print debugging output"))
parser.add_argument(
'--os-cache',
default=strutils.bool_from_string(
cliutils.env('OS_CACHE', default=False), True),
action='store_true',
help=_("Use the auth token cache. Defaults to False if "
"env[OS_CACHE] is not set."))
parser.add_argument(
'--timings',
default=False,
action='store_true',
help=_("Print call timing info"))
parser.add_argument(
'--os-auth-token',
default=cliutils.env('OS_AUTH_TOKEN'),
help='Defaults to env[OS_AUTH_TOKEN]')
parser.add_argument(
'--os_username',
help=argparse.SUPPRESS)
parser.add_argument(
'--os_password',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-tenant-name',
metavar='<auth-tenant-name>',
default=cliutils.env('OS_TENANT_NAME', 'NOVA_PROJECT_ID'),
help=_('Defaults to env[OS_TENANT_NAME].'))
parser.add_argument(
'--os_tenant_name',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-tenant-id',
metavar='<auth-tenant-id>',
default=cliutils.env('OS_TENANT_ID'),
help=_('Defaults to env[OS_TENANT_ID].'))
parser.add_argument(
'--os_auth_url',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-region-name',
metavar='<region-name>',
default=cliutils.env('OS_REGION_NAME', 'NOVA_REGION_NAME'),
help=_('Defaults to env[OS_REGION_NAME].'))
parser.add_argument(
'--os_region_name',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-auth-system',
metavar='<auth-system>',
default=cliutils.env('OS_AUTH_SYSTEM'),
help='Defaults to env[OS_AUTH_SYSTEM].')
parser.add_argument(
'--os_auth_system',
help=argparse.SUPPRESS)
parser.add_argument(
'--service-type',
metavar='<service-type>',
help=_('Defaults to compute for most actions'))
parser.add_argument(
'--service_type',
help=argparse.SUPPRESS)
parser.add_argument(
'--service-name',
metavar='<service-name>',
default=cliutils.env('NOVA_SERVICE_NAME'),
help=_('Defaults to env[NOVA_SERVICE_NAME]'))
parser.add_argument(
'--service_name',
help=argparse.SUPPRESS)
parser.add_argument(
'--volume-service-name',
metavar='<volume-service-name>',
default=cliutils.env('NOVA_VOLUME_SERVICE_NAME'),
help=_('Defaults to env[NOVA_VOLUME_SERVICE_NAME]'))
parser.add_argument(
'--volume_service_name',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-endpoint-type',
metavar='<endpoint-type>',
dest='endpoint_type',
default=cliutils.env(
'NOVA_ENDPOINT_TYPE',
default=cliutils.env(
'OS_ENDPOINT_TYPE',
default=DEFAULT_NOVA_ENDPOINT_TYPE)),
help=_('Defaults to env[NOVA_ENDPOINT_TYPE], '
'env[OS_ENDPOINT_TYPE] or ') +
DEFAULT_NOVA_ENDPOINT_TYPE + '.')
parser.add_argument(
'--endpoint-type',
help=argparse.SUPPRESS)
# NOTE(dtroyer): We can't add --endpoint_type here due to argparse
# thinking usage-list --end is ambiguous; but it
# works fine with only --endpoint-type present
# Go figure. I'm leaving this here for doc purposes.
# parser.add_argument('--endpoint_type',
# help=argparse.SUPPRESS)
parser.add_argument(
'--os-compute-api-version',
metavar='<compute-api-ver>',
default=cliutils.env('OS_COMPUTE_API_VERSION',
default=DEFAULT_OS_COMPUTE_API_VERSION),
help=_('Accepts 1.1 or 3, '
'defaults to env[OS_COMPUTE_API_VERSION].'))
parser.add_argument(
'--os_compute_api_version',
help=argparse.SUPPRESS)
parser.add_argument(
'--bypass-url',
metavar='<bypass-url>',
dest='bypass_url',
default=cliutils.env('NOVACLIENT_BYPASS_URL'),
help="Use this API endpoint instead of the Service Catalog. "
"Defaults to env[NOVACLIENT_BYPASS_URL]")
parser.add_argument('--bypass_url',
help=argparse.SUPPRESS)
# The auth-system-plugins might require some extra options
novaclient.auth_plugin.load_auth_system_opts(parser)
self._append_global_identity_args(parser)
return parser
def get_subcommand_parser(self, version):
parser = self.get_base_parser()
self.subcommands = {}
subparsers = parser.add_subparsers(metavar='<subcommand>')
try:
actions_module = {
'1.1': shell_v2,
'2': shell_v2,
'3': shell_v2,
}[version]
except KeyError:
actions_module = shell_v2
self._find_actions(subparsers, actions_module)
self._find_actions(subparsers, self)
for extension in self.extensions:
self._find_actions(subparsers, extension.module)
self._add_bash_completion_subparser(subparsers)
return parser
def _discover_extensions(self, version):
extensions = []
for name, module in itertools.chain(
self._discover_via_python_path(),
self._discover_via_contrib_path(version),
self._discover_via_entry_points()):
extension = novaclient.extension.Extension(name, module)
extensions.append(extension)
return extensions
def _discover_via_python_path(self):
for (module_loader, name, _ispkg) in pkgutil.iter_modules():
if name.endswith('_python_novaclient_ext'):
if not hasattr(module_loader, 'load_module'):
# Python 2.6 compat: actually get an ImpImporter obj
module_loader = module_loader.find_module(name)
module = module_loader.load_module(name)
if hasattr(module, 'extension_name'):
name = module.extension_name
yield name, module
def _discover_via_contrib_path(self, version):
module_path = os.path.dirname(os.path.abspath(__file__))
version_str = "v%s" % version.replace('.', '_')
# NOTE(akurilin): v1.1, v2 and v3 have one implementation, so
# we should discover contrib modules in one place.
if version_str in ["v1_1", "v3"]:
version_str = "v2"
ext_path = os.path.join(module_path, version_str, 'contrib')
ext_glob = os.path.join(ext_path, "*.py")
for ext_path in glob.iglob(ext_glob):
name = os.path.basename(ext_path)[:-3]
if name == "__init__":
continue
module = imp.load_source(name, ext_path)
yield name, module
def _discover_via_entry_points(self):
for ep in pkg_resources.iter_entry_points('novaclient.extension'):
name = ep.name
module = ep.load()
yield name, module
def _add_bash_completion_subparser(self, subparsers):
subparser = subparsers.add_parser(
'bash_completion',
add_help=False,
formatter_class=OpenStackHelpFormatter
)
self.subcommands['bash_completion'] = subparser
subparser.set_defaults(func=self.do_bash_completion)
def _find_actions(self, subparsers, actions_module):
for attr in (a for a in dir(actions_module) if a.startswith('do_')):
# I prefer to be hyphen-separated instead of underscores.
command = attr[3:].replace('_', '-')
callback = getattr(actions_module, attr)
desc = callback.__doc__ or ''
action_help = desc.strip()
arguments = getattr(callback, 'arguments', [])
subparser = subparsers.add_parser(
command,
help=action_help,
description=desc,
add_help=False,
formatter_class=OpenStackHelpFormatter)
subparser.add_argument(
'-h', '--help',
action='help',
help=argparse.SUPPRESS,
)
self.subcommands[command] = subparser
for (args, kwargs) in arguments:
subparser.add_argument(*args, **kwargs)
subparser.set_defaults(func=callback)
def setup_debugging(self, debug):
if not debug:
return
streamformat = "%(levelname)s (%(module)s:%(lineno)d) %(message)s"
# Set up the root logger to debug so that the submodules can
# print debug messages
logging.basicConfig(level=logging.DEBUG,
format=streamformat)
def _get_keystone_auth(self, session, auth_url, **kwargs):
auth_token = kwargs.pop('auth_token', None)
if auth_token:
return token.Token(auth_url, auth_token, **kwargs)
else:
return password.Password(
auth_url,
username=kwargs.pop('username'),
user_id=kwargs.pop('user_id'),
password=kwargs.pop('password'),
user_domain_id=kwargs.pop('user_domain_id'),
user_domain_name=kwargs.pop('user_domain_name'),
**kwargs)
def main(self, argv):
# Parse args once to find version and debug settings
parser = self.get_base_parser()
(options, args) = parser.parse_known_args(argv)
self.setup_debugging(options.debug)
# Discover available auth plugins
novaclient.auth_plugin.discover_auth_systems()
# build available subcommands based on version
self.extensions = self._discover_extensions(
options.os_compute_api_version)
self._run_extension_hooks('__pre_parse_args__')
# NOTE(dtroyer): Hackery to handle --endpoint_type due to argparse
# thinking usage-list --end is ambiguous; but it
# works fine with only --endpoint-type present
# Go figure.
if '--endpoint_type' in argv:
spot = argv.index('--endpoint_type')
argv[spot] = '--endpoint-type'
subcommand_parser = self.get_subcommand_parser(
options.os_compute_api_version)
self.parser = subcommand_parser
if options.help or not argv:
subcommand_parser.print_help()
return 0
args = subcommand_parser.parse_args(argv)
self._run_extension_hooks('__post_parse_args__', args)
# Short-circuit and deal with help right away.
if args.func == self.do_help:
self.do_help(args)
return 0
elif args.func == self.do_bash_completion:
self.do_bash_completion(args)
return 0
os_username = args.os_username
os_user_id = args.os_user_id
os_password = None # Fetched and set later as needed
os_tenant_name = args.os_tenant_name
os_tenant_id = args.os_tenant_id
os_auth_url = args.os_auth_url
os_region_name = args.os_region_name
os_auth_system = args.os_auth_system
endpoint_type = args.endpoint_type
insecure = args.insecure
service_type = args.service_type
service_name = args.service_name
volume_service_name = args.volume_service_name
bypass_url = args.bypass_url
os_cache = args.os_cache
cacert = args.os_cacert
timeout = args.timeout
keystone_session = None
keystone_auth = None
# We may have either, both or none of these.
# If we have both, we don't need USERNAME, PASSWORD etc.
# Fill in the blanks from the SecretsHelper if possible.
# Finally, authenticate unless we have both.
# Note if we don't auth we probably don't have a tenant ID so we can't
# cache the token.
auth_token = args.os_auth_token if args.os_auth_token else None
management_url = bypass_url if bypass_url else None
if os_auth_system and os_auth_system != "keystone":
auth_plugin = novaclient.auth_plugin.load_plugin(os_auth_system)
else:
auth_plugin = None
if not endpoint_type:
endpoint_type = DEFAULT_NOVA_ENDPOINT_TYPE
# This allow users to use endpoint_type as (internal, public or admin)
# just like other openstack clients (glance, cinder etc)
if endpoint_type in ['internal', 'public', 'admin']:
endpoint_type += 'URL'
if not service_type:
os_compute_api_version = (options.os_compute_api_version or
DEFAULT_OS_COMPUTE_API_VERSION)
try:
service_type = DEFAULT_NOVA_SERVICE_TYPE_MAP[
os_compute_api_version]
except KeyError:
service_type = DEFAULT_NOVA_SERVICE_TYPE_MAP[
DEFAULT_OS_COMPUTE_API_VERSION]
service_type = cliutils.get_service_type(args.func) or service_type
# If we have an auth token but no management_url, we must auth anyway.
# Expired tokens are handled by client.py:_cs_request
must_auth = not (cliutils.isunauthenticated(args.func)
or (auth_token and management_url))
# Do not use Keystone session for cases with no session support. The
# presence of auth_plugin means os_auth_system is present and is not
# keystone.
use_session = True
if auth_plugin or bypass_url or os_cache or volume_service_name:
use_session = False
# FIXME(usrleon): Here should be restrict for project id same as
# for os_username or os_password but for compatibility it is not.
if must_auth:
if auth_plugin:
auth_plugin.parse_opts(args)
if not auth_plugin or not auth_plugin.opts:
if not os_username and not os_user_id:
raise exc.CommandError(
_("You must provide a username "
"or user id via --os-username, --os-user-id, "
"env[OS_USERNAME] or env[OS_USER_ID]"))
if not any([args.os_tenant_name, args.os_tenant_id,
args.os_project_id, args.os_project_name]):
raise exc.CommandError(_("You must provide a project name or"
" project id via --os-project-name,"
" --os-project-id, env[OS_PROJECT_ID]"
" or env[OS_PROJECT_NAME]. You may"
" use os-project and os-tenant"
" interchangeably."))
if not os_auth_url:
if os_auth_system and os_auth_system != 'keystone':
os_auth_url = auth_plugin.get_auth_url()
if not os_auth_url:
raise exc.CommandError(
_("You must provide an auth url "
"via either --os-auth-url or env[OS_AUTH_URL] "
"or specify an auth_system which defines a "
"default url with --os-auth-system "
"or env[OS_AUTH_SYSTEM]"))
project_id = args.os_project_id or args.os_tenant_id
project_name = args.os_project_name or args.os_tenant_name
if use_session:
# Not using Nova auth plugin, so use keystone
start_time = time.time()
keystone_session = ksession.Session.load_from_cli_options(args)
keystone_auth = self._get_keystone_auth(
keystone_session,
args.os_auth_url,
username=args.os_username,
user_id=args.os_user_id,
user_domain_id=args.os_user_domain_id,
user_domain_name=args.os_user_domain_name,
password=args.os_password,
auth_token=args.os_auth_token,
project_id=project_id,
project_name=project_name,
project_domain_id=args.os_project_domain_id,
project_domain_name=args.os_project_domain_name)
end_time = time.time()
self.times.append(
('%s %s' % ('auth_url', args.os_auth_url),
start_time, end_time))
if (options.os_compute_api_version and
options.os_compute_api_version != '1.0'):
if not any([args.os_tenant_id, args.os_tenant_name,
args.os_project_id, args.os_project_name]):
raise exc.CommandError(_("You must provide a project name or"
" project id via --os-project-name,"
" --os-project-id, env[OS_PROJECT_ID]"
" or env[OS_PROJECT_NAME]. You may"
" use os-project and os-tenant"
" interchangeably."))
if not os_auth_url:
raise exc.CommandError(
_("You must provide an auth url "
"via either --os-auth-url or env[OS_AUTH_URL]"))
self.cs = client.Client(
options.os_compute_api_version,
os_username, os_password, os_tenant_name,
tenant_id=os_tenant_id, user_id=os_user_id,
auth_url=os_auth_url, insecure=insecure,
region_name=os_region_name, endpoint_type=endpoint_type,
extensions=self.extensions, service_type=service_type,
service_name=service_name, auth_system=os_auth_system,
auth_plugin=auth_plugin, auth_token=auth_token,
volume_service_name=volume_service_name,
timings=args.timings, bypass_url=bypass_url,
os_cache=os_cache, http_log_debug=options.debug,
cacert=cacert, timeout=timeout,
session=keystone_session, auth=keystone_auth)
# Now check for the password/token of which pieces of the
# identifying keyring key can come from the underlying client
if must_auth:
helper = SecretsHelper(args, self.cs.client)
if (auth_plugin and auth_plugin.opts and
"os_password" not in auth_plugin.opts):
use_pw = False
else:
use_pw = True
tenant_id = helper.tenant_id
# Allow commandline to override cache
if not auth_token:
auth_token = helper.auth_token
if not management_url:
management_url = helper.management_url
if tenant_id and auth_token and management_url:
self.cs.client.tenant_id = tenant_id
self.cs.client.auth_token = auth_token
self.cs.client.management_url = management_url
self.cs.client.password_func = lambda: helper.password
elif use_pw:
# We're missing something, so auth with user/pass and save
# the result in our helper.
self.cs.client.password = helper.password
self.cs.client.keyring_saver = helper
try:
# This does a couple of bits which are useful even if we've
# got the token + service URL already. It exits fast in that case.
if not cliutils.isunauthenticated(args.func):
if not use_session:
# Only call authenticate() if Nova auth plugin is used.
# If keystone is used, authentication is handled as part
# of session.
self.cs.authenticate()
except exc.Unauthorized:
raise exc.CommandError(_("Invalid OpenStack Nova credentials."))
except exc.AuthorizationFailure:
raise exc.CommandError(_("Unable to authorize user"))
if options.os_compute_api_version == "3" and service_type != 'image':
# NOTE(cyeoh): create an image based client because the
# images api is no longer proxied by the V3 API and we
# sometimes need to be able to look up images information
# via glance when connected to the nova api.
image_service_type = 'image'
# NOTE(hdd): the password is needed again because creating a new
# Client without specifying bypass_url will force authentication.
# We can't reuse self.cs's bypass_url, because that's the URL for
# the nova service; we need to get glance's URL for this Client
if not os_password:
os_password = helper.password
self.cs.image_cs = client.Client(
options.os_compute_api_version, os_username,
os_password, os_tenant_name, tenant_id=os_tenant_id,
auth_url=os_auth_url, insecure=insecure,
region_name=os_region_name, endpoint_type=endpoint_type,
extensions=self.extensions, service_type=image_service_type,
service_name=service_name, auth_system=os_auth_system,
auth_plugin=auth_plugin,
volume_service_name=volume_service_name,
timings=args.timings, bypass_url=bypass_url,
os_cache=os_cache, http_log_debug=options.debug,
session=keystone_session, auth=keystone_auth,
cacert=cacert, timeout=timeout)
args.func(self.cs, args)
if args.timings:
self._dump_timings(self.times + self.cs.get_timings())
def _dump_timings(self, timings):
class Tyme(object):
def __init__(self, url, seconds):
self.url = url
self.seconds = seconds
results = [Tyme(url, end - start) for url, start, end in timings]
total = 0.0
for tyme in results:
total += tyme.seconds
results.append(Tyme("Total", total))
utils.print_list(results, ["url", "seconds"], sortby_index=None)
def _run_extension_hooks(self, hook_type, *args, **kwargs):
"""Run hooks for all registered extensions."""
for extension in self.extensions:
extension.run_hooks(hook_type, *args, **kwargs)
def do_bash_completion(self, _args):
"""
Prints all of the commands and options to stdout so that the
nova.bash_completion script doesn't have to hard code them.
"""
commands = set()
options = set()
for sc_str, sc in self.subcommands.items():
commands.add(sc_str)
for option in sc._optionals._option_string_actions.keys():
options.add(option)
commands.remove('bash-completion')
commands.remove('bash_completion')
print(' '.join(commands | options))
@cliutils.arg(
'command',
metavar='<subcommand>',
nargs='?',
help='Display help for <subcommand>')
def do_help(self, args):
"""
Display help about this program or one of its subcommands.
"""
if args.command:
if args.command in self.subcommands:
self.subcommands[args.command].print_help()
else:
raise exc.CommandError(_("'%s' is not a valid subcommand") %
args.command)
else:
self.parser.print_help()
# I'm picky about my shell help.
class OpenStackHelpFormatter(argparse.HelpFormatter):
def __init__(self, prog, indent_increment=2, max_help_position=32,
width=None):
super(OpenStackHelpFormatter, self).__init__(prog, indent_increment,
max_help_position, width)
def start_section(self, heading):
# Title-case the headings
heading = '%s%s' % (heading[0].upper(), heading[1:])
super(OpenStackHelpFormatter, self).start_section(heading)
def main():
try:
argv = [encodeutils.safe_decode(a) for a in sys.argv[1:]]
OpenStackComputeShell().main(argv)
except Exception as e:
logger.debug(e, exc_info=1)
details = {'name': encodeutils.safe_encode(e.__class__.__name__),
'msg': encodeutils.safe_encode(six.text_type(e))}
print("ERROR (%(name)s): %(msg)s" % details,
file=sys.stderr)
sys.exit(1)
except KeyboardInterrupt:
print("... terminating nova client", file=sys.stderr)
sys.exit(130)
if __name__ == "__main__":
main()
|
devs1991/test_edx_docmode | refs/heads/master | venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/lobpcg/tests/benchmark.py | 10 | from __future__ import division, print_function, absolute_import
from scipy import *
from scipy.sparse.linalg import lobpcg
from symeig import symeig
from pylab import plot, show, legend, xlabel, ylabel
set_printoptions(precision=3,linewidth=90)
import time
def test(n):
x = arange(1,n+1)
B = diag(1./x)
y = arange(n-1,0,-1)
z = arange(2*n-1,0,-2)
A = diag(z)-diag(y,-1)-diag(y,1)
return A,B
def as2d(ar):
if ar.ndim == 2:
return ar
else: # Assume 1!
aux = nm.array(ar, copy=False)
aux.shape = (ar.shape[0], 1)
return aux
def precond(x):
y = linalg.cho_solve((LorU, lower),x)
return as2d(y)
m = 10 # Blocksize
N = array(([128,256,512,1024,2048])) # Increasing matrix size
data1 = []
data2 = []
for n in N:
print('******', n)
A,B = test(n) # Mikota pair
X = rand(n,m)
X = linalg.orth(X)
tt = time.clock()
(LorU, lower) = linalg.cho_factor(A, lower=0, overwrite_a=0)
eigs,vecs = lobpcg.lobpcg(X,A,B,operatorT=precond,
residualTolerance=1e-4, maxIterations=40)
data1.append(time.clock()-tt)
eigs = sort(eigs)
print()
print('Results by LOBPCG')
print()
print(n,eigs)
tt = time.clock()
w,v = symeig(A,B,range=(1,m))
data2.append(time.clock()-tt)
print()
print('Results by symeig')
print()
print(n, w)
xlabel(r'Size $n$')
ylabel(r'Elapsed time $t$')
plot(N,data1,label='LOBPCG')
plot(N,data2,label='SYMEIG')
legend()
show()
|
grangier/django-11599 | refs/heads/master | tests/regressiontests/admin_views/customadmin.py | 3 | """
A second, custom AdminSite -- see tests.CustomAdminSiteTests.
"""
from django.conf.urls.defaults import patterns
from django.contrib import admin
from django.http import HttpResponse
import models
class Admin2(admin.AdminSite):
login_template = 'custom_admin/login.html'
index_template = 'custom_admin/index.html'
# A custom index view.
def index(self, request, extra_context=None):
return super(Admin2, self).index(request, {'foo': '*bar*'})
def get_urls(self):
return patterns('',
(r'^my_view/$', self.admin_view(self.my_view)),
) + super(Admin2, self).get_urls()
def my_view(self, request):
return HttpResponse("Django is a magical pony!")
site = Admin2(name="admin2")
site.register(models.Article, models.ArticleAdmin)
site.register(models.Section, inlines=[models.ArticleInline])
site.register(models.Thing, models.ThingAdmin)
site.register(models.Fabric, models.FabricAdmin)
|
ntonjeta/iidea-Docker | refs/heads/master | examples/sobel/src/boost_1_63_0/tools/build/test/library_property.py | 44 | #!/usr/bin/python
# Copyright 2004 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# Test that the <library> property has no effect on "obj" targets. Previously,
# it affected all targets, so
#
# project : requirements <library>foo ;
# exe a : a.cpp helper ;
# obj helper : helper.cpp : <optimization>off ;
#
# caused 'foo' to be built with and without optimization.
import BoostBuild
t = BoostBuild.Tester(use_test_config=False)
t.write("jamroot.jam", """
project : requirements <library>lib//x ;
exe a : a.cpp foo ;
obj foo : foo.cpp : <variant>release ;
""")
t.write("a.cpp", """
void aux();
int main() { aux(); }
""")
t.write("foo.cpp", """
void gee();
void aux() { gee(); }
""")
t.write("lib/x.cpp", """
void
#if defined(_WIN32)
__declspec(dllexport)
#endif
gee() {}
""")
t.write("lib/jamfile.jam", """
lib x : x.cpp ;
""")
t.write("lib/jamroot.jam", """
""")
t.run_build_system()
t.expect_addition("bin/$toolset/debug/a.exe")
t.expect_nothing("lib/bin/$toolset/release/x.obj")
t.cleanup()
|
hsuchie4/TACTIC | refs/heads/master | src/pyasm/application/maya/__init__.py | 8 | ###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
from maya_builder_exec import *
from maya_builder import *
from maya_environment import *
from maya_introspect import *
from maya_parser import *
from maya_app import *
from maya_anim_file import *
from maya_impl import *
|
upliftaero/MissionPlanner | refs/heads/master | Lib/site-packages/scipy/fftpack/basic.py | 56 | """
Discrete Fourier Transforms - basic.py
"""
# Created by Pearu Peterson, August,September 2002
__all__ = ['fft','ifft','fftn','ifftn','rfft','irfft',
'fft2','ifft2']
from numpy import zeros, swapaxes
import numpy
import _fftpack
import atexit
atexit.register(_fftpack.destroy_zfft_cache)
atexit.register(_fftpack.destroy_zfftnd_cache)
atexit.register(_fftpack.destroy_drfft_cache)
atexit.register(_fftpack.destroy_cfft_cache)
atexit.register(_fftpack.destroy_cfftnd_cache)
atexit.register(_fftpack.destroy_rfft_cache)
del atexit
def istype(arr, typeclass):
return issubclass(arr.dtype.type, typeclass)
def _datacopied(arr, original):
"""
Strict check for `arr` not sharing any data with `original`,
under the assumption that arr = asarray(original)
"""
if arr is original:
return False
if not isinstance(original, numpy.ndarray) and hasattr(original, '__array__'):
return False
return arr.base is None
# XXX: single precision FFTs partially disabled due to accuracy issues
# for large prime-sized inputs.
#
# See http://permalink.gmane.org/gmane.comp.python.scientific.devel/13834
# ("fftpack test failures for 0.8.0b1", Ralf Gommers, 17 Jun 2010,
# @ scipy-dev)
#
# These should be re-enabled once the problems are resolved
def _is_safe_size(n):
"""
Is the size of FFT such that FFTPACK can handle it in single precision
with sufficient accuracy?
Composite numbers of 2, 3, and 5 are accepted, as FFTPACK has those
"""
n = int(n)
for c in (2, 3, 5):
while n % c == 0:
n /= c
return (n <= 1)
def _fake_crfft(x, n, *a, **kw):
if _is_safe_size(n):
return _fftpack.crfft(x, n, *a, **kw)
else:
return _fftpack.zrfft(x, n, *a, **kw).astype(numpy.complex64)
def _fake_cfft(x, n, *a, **kw):
if _is_safe_size(n):
return _fftpack.cfft(x, n, *a, **kw)
else:
return _fftpack.zfft(x, n, *a, **kw).astype(numpy.complex64)
def _fake_rfft(x, n, *a, **kw):
if _is_safe_size(n):
return _fftpack.rfft(x, n, *a, **kw)
else:
return _fftpack.drfft(x, n, *a, **kw).astype(numpy.float32)
def _fake_cfftnd(x, shape, *a, **kw):
if numpy.all(map(_is_safe_size, shape)):
return _fftpack.cfftnd(x, shape, *a, **kw)
else:
return _fftpack.zfftnd(x, shape, *a, **kw).astype(numpy.complex64)
_DTYPE_TO_FFT = {
# numpy.dtype(numpy.float32): _fftpack.crfft,
numpy.dtype(numpy.float32): _fake_crfft,
numpy.dtype(numpy.float64): _fftpack.zrfft,
# numpy.dtype(numpy.complex64): _fftpack.cfft,
numpy.dtype(numpy.complex64): _fake_cfft,
numpy.dtype(numpy.complex128): _fftpack.zfft,
}
_DTYPE_TO_RFFT = {
# numpy.dtype(numpy.float32): _fftpack.rfft,
numpy.dtype(numpy.float32): _fake_rfft,
numpy.dtype(numpy.float64): _fftpack.drfft,
}
_DTYPE_TO_FFTN = {
# numpy.dtype(numpy.complex64): _fftpack.cfftnd,
numpy.dtype(numpy.complex64): _fake_cfftnd,
numpy.dtype(numpy.complex128): _fftpack.zfftnd,
# numpy.dtype(numpy.float32): _fftpack.cfftnd,
numpy.dtype(numpy.float32): _fake_cfftnd,
numpy.dtype(numpy.float64): _fftpack.zfftnd,
}
def _asfarray(x):
"""Like numpy asfarray, except that it does not modify x dtype if x is
already an array with a float dtype, and do not cast complex types to
real."""
if hasattr(x, "dtype") and x.dtype.char in numpy.typecodes["AllFloat"]:
return x
else:
# We cannot use asfarray directly because it converts sequences of
# complex to sequence of real
ret = numpy.asarray(x)
if not ret.dtype.char in numpy.typecodes["AllFloat"]:
return numpy.asfarray(x)
return ret
def _fix_shape(x, n, axis):
""" Internal auxiliary function for _raw_fft, _raw_fftnd."""
s = list(x.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0,n)
x = x[index]
return x, False
else:
index = [slice(None)]*len(s)
index[axis] = slice(0,s[axis])
s[axis] = n
z = zeros(s,x.dtype.char)
z[index] = x
return z, True
def _raw_fft(x, n, axis, direction, overwrite_x, work_function):
""" Internal auxiliary function for fft, ifft, rfft, irfft."""
if n is None:
n = x.shape[axis]
elif n != x.shape[axis]:
x, copy_made = _fix_shape(x,n,axis)
overwrite_x = overwrite_x or copy_made
if axis == -1 or axis == len(x.shape)-1:
r = work_function(x,n,direction,overwrite_x=overwrite_x)
else:
x = swapaxes(x, axis, -1)
r = work_function(x,n,direction,overwrite_x=overwrite_x)
r = swapaxes(r, axis, -1)
return r
def fft(x, n=None, axis=-1, overwrite_x=0):
"""
Return discrete Fourier transform of arbitrary type sequence x.
Parameters
----------
x : array-like
array to fourier transform.
n : int, optional
Length of the Fourier transform. If n<x.shape[axis],
x is truncated. If n>x.shape[axis], x is zero-padded.
(Default n=x.shape[axis]).
axis : int, optional
Axis along which the fft's are computed. (default=-1)
overwrite_x : bool, optional
If True the contents of x can be destroyed. (default=False)
Returns
-------
z : complex ndarray
with the elements:
[y(0),y(1),..,y(n/2-1),y(-n/2),...,y(-1)] if n is even
[y(0),y(1),..,y((n-1)/2),y(-(n-1)/2),...,y(-1)] if n is odd
where
y(j) = sum[k=0..n-1] x[k] * exp(-sqrt(-1)*j*k* 2*pi/n), j = 0..n-1
Note that y(-j) = y(n-j).conjugate().
See Also
--------
ifft : Inverse FFT
rfft : FFT of a real sequence
Notes
-----
The packing of the result is "standard": If A = fft(a, n), then A[0]
contains the zero-frequency term, A[1:n/2+1] contains the
positive-frequency terms, and A[n/2+1:] contains the negative-frequency
terms, in order of decreasingly negative frequency. So for an 8-point
transform, the frequencies of the result are [ 0, 1, 2, 3, 4, -3, -2, -1].
This is most efficient for n a power of two.
Examples
--------
>>> x = np.arange(5)
>>> np.all(np.abs(x-fft(ifft(x))<1.e-15) #within numerical accuracy.
True
"""
tmp = _asfarray(x)
try:
work_function = _DTYPE_TO_FFT[tmp.dtype]
except KeyError:
raise ValueError("type %s is not supported" % tmp.dtype)
if not (istype(tmp, numpy.complex64) or istype(tmp, numpy.complex128)):
overwrite_x = 1
overwrite_x = overwrite_x or _datacopied(tmp, x)
if n is None:
n = tmp.shape[axis]
elif n != tmp.shape[axis]:
tmp, copy_made = _fix_shape(tmp,n,axis)
overwrite_x = overwrite_x or copy_made
if axis == -1 or axis == len(tmp.shape) - 1:
return work_function(tmp,n,1,0,overwrite_x)
tmp = swapaxes(tmp, axis, -1)
tmp = work_function(tmp,n,1,0,overwrite_x)
return swapaxes(tmp, axis, -1)
def ifft(x, n=None, axis=-1, overwrite_x=0):
"""
Return discrete inverse Fourier transform of real or complex sequence.
The returned complex array contains ``y(0), y(1),..., y(n-1)`` where
``y(j) = (x * exp(2*pi*sqrt(-1)*j*np.arange(n)/n)).mean()``.
Parameters
----------
x : array_like
Transformed data to invert.
n : int, optional
Length of the inverse Fourier transform. If ``n < x.shape[axis]``,
`x` is truncated. If ``n > x.shape[axis]``, `x` is zero-padded.
The default results in ``n = x.shape[axis]``.
axis : int, optional
Axis along which the ifft's are computed; the default is over the
last axis (i.e., ``axis=-1``).
overwrite_x : bool, optional
If True the contents of `x` can be destroyed; the default is False.
"""
tmp = _asfarray(x)
try:
work_function = _DTYPE_TO_FFT[tmp.dtype]
except KeyError:
raise ValueError("type %s is not supported" % tmp.dtype)
if not (istype(tmp, numpy.complex64) or istype(tmp, numpy.complex128)):
overwrite_x = 1
overwrite_x = overwrite_x or _datacopied(tmp, x)
if n is None:
n = tmp.shape[axis]
elif n != tmp.shape[axis]:
tmp, copy_made = _fix_shape(tmp,n,axis)
overwrite_x = overwrite_x or copy_made
if axis == -1 or axis == len(tmp.shape) - 1:
return work_function(tmp,n,-1,1,overwrite_x)
tmp = swapaxes(tmp, axis, -1)
tmp = work_function(tmp,n,-1,1,overwrite_x)
return swapaxes(tmp, axis, -1)
def rfft(x, n=None, axis=-1, overwrite_x=0):
"""
Discrete Fourier transform of a real sequence.
The returned real arrays contains::
[y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2))] if n is even
[y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2)),Im(y(n/2))] if n is odd
where
::
y(j) = sum[k=0..n-1] x[k] * exp(-sqrt(-1)*j*k*2*pi/n)
j = 0..n-1
Note that ``y(-j) == y(n-j).conjugate()``.
Parameters
----------
x : array_like, real-valued
The data to tranform.
n : int, optional
Defines the length of the Fourier transform. If `n` is not specified
(the default) then ``n = x.shape[axis]``. If ``n < x.shape[axis]``,
`x` is truncated, if ``n > x.shape[axis]``, `x` is zero-padded.
axis : int, optional
The axis along which the transform is applied. The default is the
last axis.
overwrite_x : bool, optional
If set to true, the contents of `x` can be overwritten. Default is
False.
See also
--------
fft, irfft, scipy.fftpack.basic
Notes
-----
Within numerical accuracy, ``y == rfft(irfft(y))``.
"""
tmp = _asfarray(x)
if not numpy.isrealobj(tmp):
raise TypeError("1st argument must be real sequence")
try:
work_function = _DTYPE_TO_RFFT[tmp.dtype]
except KeyError:
raise ValueError("type %s is not supported" % tmp.dtype)
overwrite_x = overwrite_x or _datacopied(tmp, x)
return _raw_fft(tmp,n,axis,1,overwrite_x,work_function)
def irfft(x, n=None, axis=-1, overwrite_x=0):
""" irfft(x, n=None, axis=-1, overwrite_x=0) -> y
Return inverse discrete Fourier transform of real sequence x.
The contents of x is interpreted as the output of rfft(..)
function.
The returned real array contains
[y(0),y(1),...,y(n-1)]
where for n is even
y(j) = 1/n (sum[k=1..n/2-1] (x[2*k-1]+sqrt(-1)*x[2*k])
* exp(sqrt(-1)*j*k* 2*pi/n)
+ c.c. + x[0] + (-1)**(j) x[n-1])
and for n is odd
y(j) = 1/n (sum[k=1..(n-1)/2] (x[2*k-1]+sqrt(-1)*x[2*k])
* exp(sqrt(-1)*j*k* 2*pi/n)
+ c.c. + x[0])
c.c. denotes complex conjugate of preceeding expression.
Optional input: see rfft.__doc__
"""
tmp = _asfarray(x)
if not numpy.isrealobj(tmp):
raise TypeError("1st argument must be real sequence")
try:
work_function = _DTYPE_TO_RFFT[tmp.dtype]
except KeyError:
raise ValueError("type %s is not supported" % tmp.dtype)
overwrite_x = overwrite_x or _datacopied(tmp, x)
return _raw_fft(tmp,n,axis,-1,overwrite_x,work_function)
def _raw_fftnd(x, s, axes, direction, overwrite_x, work_function):
""" Internal auxiliary function for fftnd, ifftnd."""
if s is None:
if axes is None:
s = x.shape
else:
s = numpy.take(x.shape, axes)
s = tuple(s)
if axes is None:
noaxes = True
axes = range(-x.ndim, 0)
else:
noaxes = False
if len(axes) != len(s):
raise ValueError("when given, axes and shape arguments "\
"have to be of the same length")
# No need to swap axes, array is in C order
if noaxes:
for i in axes:
x, copy_made = _fix_shape(x, s[i], i)
overwrite_x = overwrite_x or copy_made
return work_function(x,s,direction,overwrite_x=overwrite_x)
# We ordered axes, because the code below to push axes at the end of the
# array assumes axes argument is in ascending order.
id = numpy.argsort(axes)
axes = [axes[i] for i in id]
s = [s[i] for i in id]
# Swap the request axes, last first (i.e. First swap the axis which ends up
# at -1, then at -2, etc...), such as the request axes on which the
# operation is carried become the last ones
for i in range(1, len(axes)+1):
x = numpy.swapaxes(x, axes[-i], -i)
# We can now operate on the axes waxes, the p last axes (p = len(axes)), by
# fixing the shape of the input array to 1 for any axis the fft is not
# carried upon.
waxes = range(x.ndim - len(axes), x.ndim)
shape = numpy.ones(x.ndim)
shape[waxes] = s
for i in range(len(waxes)):
x, copy_made = _fix_shape(x, s[i], waxes[i])
overwrite_x = overwrite_x or copy_made
r = work_function(x, shape, direction, overwrite_x=overwrite_x)
# reswap in the reverse order (first axis first, etc...) to get original
# order
for i in range(len(axes), 0, -1):
r = numpy.swapaxes(r, -i, axes[-i])
return r
def fftn(x, shape=None, axes=None, overwrite_x=0):
""" fftn(x, shape=None, axes=None, overwrite_x=0) -> y
Return multi-dimensional discrete Fourier transform of arbitrary
type sequence x.
The returned array contains
y[j_1,..,j_d] = sum[k_1=0..n_1-1, ..., k_d=0..n_d-1]
x[k_1,..,k_d] * prod[i=1..d] exp(-sqrt(-1)*2*pi/n_i * j_i * k_i)
where d = len(x.shape) and n = x.shape.
Note that y[..., -j_i, ...] = y[..., n_i-j_i, ...].conjugate().
Optional input:
shape
Defines the shape of the Fourier transform. If shape is not
specified then shape=take(x.shape,axes,axis=0).
If shape[i]>x.shape[i] then the i-th dimension is padded with
zeros. If shape[i]<x.shape[i], then the i-th dimension is
truncated to desired length shape[i].
axes
The transform is applied along the given axes of the input
array (or the newly constructed array if shape argument was
used).
overwrite_x
If set to true, the contents of x can be destroyed.
Notes:
y == fftn(ifftn(y)) within numerical accuracy.
"""
return _raw_fftn_dispatch(x, shape, axes, overwrite_x, 1)
def _raw_fftn_dispatch(x, shape, axes, overwrite_x, direction):
tmp = _asfarray(x)
try:
work_function = _DTYPE_TO_FFTN[tmp.dtype]
except KeyError:
raise ValueError("type %s is not supported" % tmp.dtype)
if not (istype(tmp, numpy.complex64) or istype(tmp, numpy.complex128)):
overwrite_x = 1
overwrite_x = overwrite_x or _datacopied(tmp, x)
return _raw_fftnd(tmp,shape,axes,direction,overwrite_x,work_function)
def ifftn(x, shape=None, axes=None, overwrite_x=0):
"""
Return inverse multi-dimensional discrete Fourier transform of
arbitrary type sequence x.
The returned array contains::
y[j_1,..,j_d] = 1/p * sum[k_1=0..n_1-1, ..., k_d=0..n_d-1]
x[k_1,..,k_d] * prod[i=1..d] exp(sqrt(-1)*2*pi/n_i * j_i * k_i)
where ``d = len(x.shape)``, ``n = x.shape``, and ``p = prod[i=1..d] n_i``.
For description of parameters see `fftn`.
See Also
--------
fftn : for detailed information.
"""
return _raw_fftn_dispatch(x, shape, axes, overwrite_x, -1)
def fft2(x, shape=None, axes=(-2,-1), overwrite_x=0):
"""
2-D discrete Fourier transform.
Return the two-dimensional discrete Fourier transform of the 2-D argument
`x`.
See Also
--------
fftn : for detailed information.
"""
return fftn(x,shape,axes,overwrite_x)
def ifft2(x, shape=None, axes=(-2,-1), overwrite_x=0):
"""
2-D discrete inverse Fourier transform of real or complex sequence.
Return inverse two-dimensional discrete Fourier transform of
arbitrary type sequence x.
See `ifft` for more information.
See also
--------
fft2, ifft
"""
return ifftn(x,shape,axes,overwrite_x)
|
zlfben/gem5 | refs/heads/develop | src/arch/x86/isa/insts/simd128/floating_point/arithmetic/horizontal_subtraction.py | 91 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
# HSUBPS
# HSUBPD
'''
|
n3ocort3x/endeavoru_3.14 | refs/heads/master | tools/perf/scripts/python/sctop.py | 11180 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
coreos/chromite | refs/heads/master | lib/chrome_util.py | 2 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Library containing utility functions used for Chrome-specific build tasks.
"""
import functools
import glob
import logging
import os
import shlex
import shutil
from chromite.buildbot import cbuildbot_results as results_lib
from chromite.lib import cros_build_lib
from chromite.lib import osutils
# Taken from external/gyp.git/pylib.
def _NameValueListToDict(name_value_list):
"""
Takes an array of strings of the form 'NAME=VALUE' and creates a dictionary
of the pairs. If a string is simply NAME, then the value in the dictionary
is set to True. If VALUE can be converted to an integer, it is.
"""
result = { }
for item in name_value_list:
tokens = item.split('=', 1)
if len(tokens) == 2:
# If we can make it an int, use that, otherwise, use the string.
try:
token_value = int(tokens[1])
except ValueError:
token_value = tokens[1]
# Set the variable to the supplied value.
result[tokens[0]] = token_value
else:
# No value supplied, treat it as a boolean and set it.
result[tokens[0]] = True
return result
def ProcessGypDefines(defines):
"""Validate and convert a string containing GYP_DEFINES to dictionary."""
assert defines is not None
return _NameValueListToDict(shlex.split(defines))
def DictToGypDefines(def_dict):
"""Convert a dict to GYP_DEFINES format."""
def_list = []
for k, v in def_dict.iteritems():
def_list.append("%s='%s'" % (k, v))
return ' '.join(def_list)
class Conditions(object):
"""Functions that return conditions used to construct Path objects.
Condition functions returned by the public methods have signature
f(gyp_defines, staging_flags). For description of gyp_defines and
staging_flags see docstring for StageChromeFromBuildDir().
"""
@classmethod
def _GypSet(cls, flag, value, gyp_defines, _staging_flags):
val = gyp_defines.get(flag)
return val == value if value is not None else bool(val)
@classmethod
def _GypNotSet(cls, flag, gyp_defines, staging_flags):
return not cls._GypSet(flag, None, gyp_defines, staging_flags)
@classmethod
def _StagingFlagSet(cls, flag, _gyp_defines, staging_flags):
return flag in staging_flags
@classmethod
def GypSet(cls, flag, value=None):
"""Returns condition that tests a gyp flag is set (possibly to a value)."""
return functools.partial(cls._GypSet, flag, value)
@classmethod
def GypNotSet(cls, flag):
"""Returns condition that tests a gyp flag is not set."""
return functools.partial(cls._GypNotSet, flag)
@classmethod
def StagingFlagSet(cls, flag):
"""Returns condition that tests a staging_flag is set."""
return functools.partial(cls._StagingFlagSet, flag)
class MultipleMatchError(results_lib.StepFailure):
"""A glob pattern matches multiple files but a non-dir dest was specified."""
class MissingPathError(results_lib.StepFailure):
"""An expected path is non-existant."""
class MustNotBeDirError(results_lib.StepFailure):
"""The specified path should not be a directory, but is."""
class Copier(object):
"""Single file/directory copier.
Provides destination stripping and permission setting functionality.
"""
def __init__(self, strip_bin=None, exe_opts=None):
"""Initialization.
Arguments:
strip_bin: Path to the program used to strip binaries. If set to None,
binaries will not be stripped.
exe_opts: Permissions to set on executables.
"""
self.strip_bin = strip_bin
self.exe_opts = exe_opts
def Copy(self, src, dest, exe):
"""Perform the copy.
Arguments:
src: The path of the file/directory to copy.
dest: The exact path of the destination. Should not already exist.
exe: If |src| is a file, whether the file is an executable. If |src| is a
directory, whether to treat the contents of the directory as
executables.
"""
def Log(directory):
sep = ' [d] -> ' if directory else ' -> '
logging.debug('%s %s %s', src, sep, dest)
osutils.SafeMakedirs(os.path.dirname(dest))
src_is_dir = os.path.isdir(src)
Log(src_is_dir)
if src_is_dir:
# copytree() does not know about copying to a containing directory.
if os.path.isdir(dest):
dest = os.path.join(dest, os.path.basename(src))
shutil.copytree(src, dest)
elif exe and os.path.getsize(src) > 0:
if self.strip_bin:
cros_build_lib.DebugRunCommand([self.strip_bin, '--strip-unneeded',
'-o', dest, src])
shutil.copystat(src, dest)
if self.exe_opts is not None:
os.chmod(dest, self.exe_opts)
else:
shutil.copy2(src, dest)
class Path(object):
"""Represents an artifact to be copied from build dir to staging dir."""
def __init__(self, src, exe=False, cond=None, dest=None, optional=False):
"""Initializes the object.
Arguments:
src: The relative path of the artifact. Can be a file or a directory.
Can be a glob pattern.
exe: Identifes the path as either being an executable or containing
executables. Executables may be stripped during copy, and have
special permissions set. We currently only support stripping of
specified files and glob patterns that return files. If |src| is a
directory or contains directories, the content of the directory will
not be stripped.
cond: A condition (see Conditions class) to test for in deciding whether
to process this artifact. If supplied, the artifact will be treated
as optional unless --strict is supplied.
dest: Name to give to the target file/directory. Defaults to keeping the
same name as the source.
optional: Whether to enforce the existence of the artifact. If unset, the
script errors out if the artifact does not exist.
"""
self.src = src
self.exe = exe
self.cond = cond
self.optional = optional or cond
self.dest = dest
def ShouldProcess(self, gyp_defines, staging_flags):
"""Tests whether this artifact should be copied."""
if self.cond:
return self.cond(gyp_defines, staging_flags)
return True
def Copy(self, src_base, dest_base, copier, strict, sloppy):
"""Copy artifact(s) from source directory to destination.
Arguments:
src_base: The directory to apply the src glob pattern match in.
dest_base: The directory to copy matched files to. |Path.dest|.
copier: A Copier instance that performs the actual file/directory copying.
strict: If set, enforce that all optional files are copied.
sloppy: If set, ignore when mandatory artifacts are missing.
Returns:
A list of the artifacts copied.
"""
assert not (strict and sloppy), 'strict and sloppy are not compatible.'
src = os.path.join(src_base, self.src)
paths = glob.glob(src)
if not paths:
if strict or (not self.optional and not sloppy):
msg = ('%s does not exist and is required.\n'
'You can bypass this error with --sloppy.\n'
'Aborting copy...' % src)
raise MissingPathError(msg)
elif self.optional:
logging.debug('%s does not exist and is optional. Skipping.', src)
else:
logging.warn('%s does not exist and is required. Skipping anyway.',
src)
elif len(paths) > 1 and self.dest and not self.dest.endswith('/'):
raise MultipleMatchError(
'Glob pattern %r has multiple matches, but dest %s '
'is not a directory.\n'
'Aborting copy...' % (self.src, self.dest))
elif not src.endswith('/') and os.path.isdir(src):
raise MustNotBeDirError('%s must not be a directory\n'
'Aborting copy...' % (src,))
else:
for p in paths:
dest = os.path.join(
dest_base,
os.path.relpath(p, src_base) if self.dest is None else self.dest)
copier.Copy(p, dest, self.exe)
return paths
_DISABLE_NACL = 'disable_nacl'
_USE_DRM = 'use_drm'
_USE_PDF = 'use_pdf'
_HIGHDPI_FLAG = 'highdpi'
_CONTENT_SHELL_FLAG = 'content_shell'
_WIDEVINE_FLAG = 'widevine'
STAGING_FLAGS = (_HIGHDPI_FLAG, _CONTENT_SHELL_FLAG, _WIDEVINE_FLAG)
_CHROME_SANDBOX_DEST = 'chrome-sandbox'
C = Conditions
_COPY_PATHS = (
Path('ash_shell',
cond=C.GypSet(_USE_DRM)),
Path('aura_demo',
cond=C.GypSet(_USE_DRM)),
Path('chrome',
exe=True),
Path('chrome_sandbox',
dest=_CHROME_SANDBOX_DEST),
Path('chrome-wrapper'),
Path('chrome.pak'),
Path('chrome_100_percent.pak'),
Path('chrome_200_percent.pak',
cond=C.StagingFlagSet(_HIGHDPI_FLAG)),
Path('content_shell',
exe=True,
cond=C.StagingFlagSet(_CONTENT_SHELL_FLAG)),
Path('content_shell.pak',
cond=C.StagingFlagSet(_CONTENT_SHELL_FLAG)),
Path('extensions/',
optional=True),
Path('lib/*.so',
exe=True,
cond=C.GypSet('component', value='shared_library')),
Path('libffmpegsumo.so',
exe=True),
Path('libpdf.so',
exe=True,
cond=C.GypSet(_USE_PDF)),
Path('libppGoogleNaClPluginChrome.so',
exe=True,
cond=C.GypNotSet(_DISABLE_NACL)),
Path('libosmesa.so',
exe=True, optional=True),
Path('libwidevinecdmadapter.so',
exe=True,
cond=C.StagingFlagSet(_WIDEVINE_FLAG)),
Path('libwidevinecdm.so',
exe=True,
cond=C.StagingFlagSet(_WIDEVINE_FLAG)),
Path('locales/'),
Path('nacl_helper_bootstrap',
cond=C.GypNotSet(_DISABLE_NACL)),
Path('nacl_irt_*.nexe',
exe=True,
cond=C.GypNotSet(_DISABLE_NACL)),
Path('nacl_helper',
exe=True, optional=True,
cond=C.GypNotSet(_DISABLE_NACL)),
Path('resources/'),
Path('resources.pak'),
Path('xdg-settings'),
Path('*.png'),
)
def _FixPermissions(dest_base):
"""Last minute permission fixes."""
# Set the suid bit for the chrome sandbox.
# TODO(rcui): Implement this through a permission mask attribute in the Path
# class.
cros_build_lib.DebugRunCommand(['chmod', '-R', 'a+r', dest_base])
cros_build_lib.DebugRunCommand(
['find', dest_base, '-perm', '/110', '-exec', 'chmod', 'a+x', '{}', '+'])
target = os.path.join(dest_base, _CHROME_SANDBOX_DEST)
if os.path.exists(target):
cros_build_lib.DebugRunCommand(['chmod', '4755', target])
class StagingError(results_lib.StepFailure):
"""An error occurred during StageChromeFromBuildDir."""
def StageChromeFromBuildDir(staging_dir, build_dir, strip_bin, strict=False,
sloppy=False, gyp_defines=None, staging_flags=None):
"""Populates a staging directory with necessary build artifacts.
If |strict| is set, then we decide what to stage based on the |gyp_defines|
and |staging_flags| passed in. Otherwise, we stage everything that we know
about, that we can find.
Arguments:
staging_dir: Path to an empty staging directory.
build_dir: Path to location of Chrome build artifacts.
strip_bin: Path to executable used for stripping binaries.
strict: If set, decide what to stage based on the |gyp_defines| and
|staging_flags| passed in, and enforce that all optional files
are copied. Otherwise, we stage optional files if they are
there, but we don't complain if they're not.
sloppy: Ignore when mandatory artifacts are missing.
gyp_defines: A dictionary (i.e., one returned by ProcessGypDefines)
containing GYP_DEFINES Chrome was built with.
staging_flags: A list of extra staging flags. Valid flags are specified in
STAGING_FLAGS.
"""
if os.path.exists(staging_dir) and os.listdir(staging_dir):
raise StagingError('Staging directory %s must be empty.' % staging_dir)
osutils.SafeMakedirs(os.path.join(staging_dir, 'plugins'))
cros_build_lib.DebugRunCommand(['chmod', '-R', '0755', staging_dir])
if gyp_defines is None:
gyp_defines = {}
if staging_flags is None:
staging_flags = []
copier = Copier(strip_bin=strip_bin, exe_opts=0755)
copied_paths = []
for p in _COPY_PATHS:
if not strict or p.ShouldProcess(gyp_defines, staging_flags):
copied_paths += p.Copy(build_dir, staging_dir, copier, strict, sloppy)
if not copied_paths:
raise MissingPathError('Couldn\'t find anything to copy!\n'
'Are you looking in the right directory?\n'
'Aborting copy...')
_FixPermissions(staging_dir)
|
ktbyers/pynet-ons-mar17 | refs/heads/master | jinja2_example/jinja2_test.py | 2 | import jinja2
my_dict = {'a': 'whatever'}
my_template = '''
some
text
of
something
{{ a }}
something
'''
t = jinja2.Template(my_template)
print t.render(my_dict)
|
ccomb/OpenUpgrade | refs/heads/master | addons/website_blog/tests/test_ui.py | 34 | import openerp.tests
class TestUi(openerp.tests.HttpCase):
def test_admin(self):
self.phantom_js("/", "openerp.website.Tour.run('blog', 'test')", "openerp.website.Tour.tours.blog")
|
eeue56/PyGeo2 | refs/heads/master | pygeo/tests/complex/test_AniPoints.py | 1 | from pygeo import *
v=display(scale=5,camera_vector=[0,.1,-.1],delay=.5,trace_on=1)
# CLASS being called
""" pickable points constrained to complex
plane at initial given x,y coords """
z1=zFreePoint(1,-3,color=BLACK) #Z_FreePostion
z2=zFreePoint(-1,-1,color=BLACK) #Z_FreePostion
z3=zFreePoint(1,-1,color=BLACK) #Z_FreePostion
""" circles on complex plane through and determined by
three point arguments """
zcircle=zCircle(z1,z2,z3,level=2) #zCircumCircle
#--------------------------------------------------
""" point of complex plane moving on the circumference
of the circle argument at each update cycle"""
zAniPoint(zcircle,rate=36,trace=True,color=RED) #zCirclingPoint
#--------------------------------------------------
""" line on complex plane through point arguments"""
zline=zLine(z1,z2,level=1) #zLineFromPoints
#--------------------------------------------------
""" point of complex plane moving on the line
argument at each update cycle"""
zAniPoint(zline,rate=36,trace=True) #zSlidingPoint
#--------------------------------------------------
""" circle on unit sphere determined by
projection of circle on complex plane """
ucircle=uCircle(zcircle,level=1) #z_to_uCircle
#--------------------------------------------------
""" point of unit sphere moving on the cirfcumference
of the circle argument at each update cycle"""
uc=uAniPoint(ucircle,level=1) #uCirclingPoint
#--------------------------------------------------
""" projection of given point on unit sphere to point
to complex plane """
zPoint(uc,trace=True) #u_to_zPoint
#--------------------------------------------------
""" point moving on surface of unit sphere """
us=uAniPoint(trace=True,color=BLUE) #uSpiral
#--------------------------------------------------
""" projection of given point on unit sphere to point
to complex plane """
zPoint(us,trace=True) #u_to_zPoint
#--------------------------------------------------
""" renderingof the unit sphere """
uSphere() #uSphere
#--------------------------------------------------
#v.pickloop()
v.animate() |
sobercoder/gem5 | refs/heads/master | ext/ply/test/yacc_notfunc.py | 174 | # -----------------------------------------------------------------------------
# yacc_notfunc.py
#
# p_rule not defined as a function
# -----------------------------------------------------------------------------
import sys
if ".." not in sys.path: sys.path.insert(0,"..")
import ply.yacc as yacc
from calclex import tokens
# Parsing rules
precedence = (
('left','PLUS','MINUS'),
('left','TIMES','DIVIDE'),
('right','UMINUS'),
)
# dictionary of names
names = { }
p_statement_assign = "Blah"
def p_statement_expr(t):
'statement : expression'
print(t[1])
def p_expression_binop(t):
'''expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression'''
if t[2] == '+' : t[0] = t[1] + t[3]
elif t[2] == '-': t[0] = t[1] - t[3]
elif t[2] == '*': t[0] = t[1] * t[3]
elif t[2] == '/': t[0] = t[1] / t[3]
def p_expression_uminus(t):
'expression : MINUS expression %prec UMINUS'
t[0] = -t[2]
def p_expression_group(t):
'expression : LPAREN expression RPAREN'
t[0] = t[2]
def p_expression_number(t):
'expression : NUMBER'
t[0] = t[1]
def p_expression_name(t):
'expression : NAME'
try:
t[0] = names[t[1]]
except LookupError:
print("Undefined name '%s'" % t[1])
t[0] = 0
def p_error(t):
print("Syntax error at '%s'" % t.value)
yacc.yacc()
|
ActiveState/code | refs/heads/master | recipes/Python/578675_PyQt4_pressed_modifier_keys_names_method/recipe-578675.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from PyQt4.QtCore import Qt
from PyQt4.QtGui import QApplication
import functools
modies = { 'shift': Qt.ShiftModifier,
'control': Qt.ControlModifier,
'alt': Qt.AltModifier,
'meta': Qt.MetaModifier }
def check_modifiers(org_meth):
"""Add modifiers kwarg to a method that contains a tuple of currently pressed modifiers."""
@functools.wraps(org_meth)
def wrapper(*args, **kwargs):
curr = QApplication.keyboardModifiers()
kwargs['modifiers'] = tuple( name for name, which in modies.items() if curr & which == which )
org_meth(*args, **kwargs)
return wrapper
if __name__ == '__main__':
import sip
from PyQt4 import QtGui, QtCore
class MainWindow(QtGui.QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
centralWidget = QtGui.QWidget(self)
layout = QtGui.QHBoxLayout(centralWidget)
self.setCentralWidget(centralWidget)
self.clickButton = QtGui.QPushButton("click", centralWidget)
self.clickButton.clicked.connect(self.klick)
layout.addWidget(self.clickButton)
self.statusBar()
self.setFixedWidth(600)
@check_modifiers
def klick(self, event, modifiers):
ms = QtGui.QApplication.keyboardModifiers()
m = "keyboardModifiers: {1:0=32b} {0} has been pressed"
self.statusBar().showMessage(m.format(repr(modifiers), int(ms)))
import sys
app = QtGui.QApplication(sys.argv)
mainWin = MainWindow()
mainWin.show()
sys.exit(app.exec_())
|
cloudera/ibis | refs/heads/master | ibis/config.py | 1 | """Ibis configuration module."""
# This file has been adapted from pandas/core/config.py. pandas 3-clause BSD
# license. See LICENSES/pandas
#
# Further modifications:
#
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pprint
import re
import warnings
from collections import namedtuple
from contextlib import contextmanager
from typing import Callable
DeprecatedOption = namedtuple('DeprecatedOption', 'key msg rkey removal_ver')
RegisteredOption = namedtuple(
'RegisteredOption', 'key defval doc validator cb'
)
_deprecated_options = {} # holds deprecated option metdata
_registered_options = {} # holds registered option metdata
_global_config = {} # holds the current values for registered options
_reserved_keys = ['all'] # keys which have a special meaning
class OptionError(AttributeError, KeyError):
"""Exception for ibis.options.
Backwards compatible with KeyError checks.
"""
pass
# User API
def _get_single_key(pat, silent):
keys = _select_options(pat)
if len(keys) == 0:
if not silent:
_warn_if_deprecated(pat)
raise OptionError('No such keys(s): %r' % pat)
if len(keys) > 1:
raise OptionError('Pattern matched multiple keys')
key = keys[0]
if not silent:
_warn_if_deprecated(key)
key = _translate_key(key)
return key
def _get_option(pat, silent=False):
key = _get_single_key(pat, silent)
# walk the nested dict
root, k = _get_root(key)
return root[k]
def _set_option(*args, **kwargs):
# must at least 1 arg deal with constraints later
nargs = len(args)
if not nargs or nargs % 2 != 0:
raise ValueError(
"Must provide an even number of non-keyword " "arguments"
)
# default to false
silent = kwargs.get('silent', False)
for k, v in zip(args[::2], args[1::2]):
key = _get_single_key(k, silent)
o = _get_registered_option(key)
if o and o.validator:
o.validator(v)
# walk the nested dict
root, k = _get_root(key)
root[k] = v
if o.cb:
o.cb(key)
def _describe_option(pat='', _print_desc=True):
keys = _select_options(pat)
if len(keys) == 0:
raise OptionError('No such keys(s)')
s = ''
for k in keys: # filter by pat
s += _build_option_description(k)
if _print_desc:
print(s)
else:
return s
def _reset_option(pat, silent=False):
keys = _select_options(pat)
if len(keys) == 0:
raise OptionError('No such keys(s)')
if len(keys) > 1 and len(pat) < 4 and pat != 'all':
raise ValueError(
'You must specify at least 4 characters when '
'resetting multiple keys, use the special keyword '
'"all" to reset all the options to their default '
'value'
)
for k in keys:
_set_option(k, _registered_options[k].defval, silent=silent)
def get_default_val(pat):
"""Return the default value for given pattern.
Parameters
----------
pat : string
Returns
-------
RegisteredOption (namedtuple) if key is deprecated, None otherwise
"""
key = _get_single_key(pat, silent=True)
return _get_registered_option(key).defval
class DictWrapper:
"""Provide attribute-style access to a nested dict."""
def __init__(self, d, prefix=""):
object.__setattr__(self, "d", d)
object.__setattr__(self, "prefix", prefix)
def __repr__(self):
"""Return the dictionary as formatted string."""
return pprint.pformat(self.d)
def __setattr__(self, key, val):
"""Set given value for the given attribute name (key).
Parameters
----------
key : string
val : object
"""
prefix = self.prefix
if prefix:
prefix += "."
prefix += key
# you can't set new keys and you can't overwrite subtrees
if key in self.d and not isinstance(self.d[key], dict):
_set_option(prefix, val)
else:
raise OptionError("You can only set the value of existing options")
def __getattr__(self, key):
"""Get value for the given attribute name.
Parameters
----------
key : str
Returns
-------
object
"""
prefix = self.prefix
if prefix:
prefix += "."
prefix += key
try:
v = self.d[key]
except KeyError as e:
raise AttributeError(*e.args)
if isinstance(v, dict):
return DictWrapper(v, prefix)
else:
return _get_option(prefix)
def __dir__(self):
"""Return all dictionary keys sorted."""
return sorted(self.d.keys())
class CallableDynamicDoc:
"""Convert __doc__ into a property function.
For user convenience, we'd like to have the available options described
in the docstring. For dev convenience we'd like to generate the docstrings
dynamically instead of maintaining them by hand. To this, we use this
class which wraps functions inside a callable, and converts
__doc__ into a property function. The doctsrings below are templates
using the py2.6+ advanced formatting syntax to plug in a concise list
of options, and option descriptions.
"""
def __init__(self, func, doc_tmpl):
self.__doc_tmpl__ = doc_tmpl
self.__func__ = func
def __call__(self, *args, **kwds):
"""Call the the function defined when the object was initialized."""
return self.__func__(*args, **kwds)
@property
def __doc__(self) -> str:
"""Create automatically a documentation using a template.
Returns
-------
string
"""
opts_desc = _describe_option('all', _print_desc=False)
opts_list = pp_options_list(list(_registered_options.keys()))
return self.__doc_tmpl__.format(
opts_desc=opts_desc, opts_list=opts_list
)
_get_option_tmpl = """
get_option(pat)
Retrieves the value of the specified option.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp which should match a single option.
Note: partial matches are supported for convenience, but unless you use the
full option name (e.g. x.y.z.option_name), your code may break in future
versions if new options with similar names are introduced.
Returns
-------
result : the value of the option
Raises
------
OptionError : if no such option exists
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_set_option_tmpl = """
set_option(pat, value)
Sets the value of the specified option.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp which should match a single option.
Note: partial matches are supported for convenience, but unless you use the
full option name (e.g. x.y.z.option_name), your code may break in future
versions if new options with similar names are introduced.
value :
new value of option.
Returns
-------
None
Raises
------
OptionError if no such option exists
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_describe_option_tmpl = """
describe_option(pat, _print_desc=False)
Prints the description for one or more registered options.
Call with not arguments to get a listing for all registered options.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp pattern. All matching keys will have their description displayed.
_print_desc : bool, default True
If True (default) the description(s) will be printed to stdout.
Otherwise, the description(s) will be returned as a unicode string
(for testing).
Returns
-------
None by default, the description(s) as a unicode string if _print_desc
is False
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_reset_option_tmpl = """
reset_option(pat)
Reset one or more options to their default value.
Pass "all" as argument to reset all options.
Available options:
{opts_list}
Parameters
----------
pat : str/regex
If specified only options matching `prefix*` will be reset.
Note: partial matches are supported for convenience, but unless you
use the full option name (e.g. x.y.z.option_name), your code may break
in future versions if new options with similar names are introduced.
Returns
-------
None
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
# bind the functions with their docstrings into a Callable
# and use that as the functions exposed in pd.api
get_option = CallableDynamicDoc(_get_option, _get_option_tmpl)
set_option = CallableDynamicDoc(_set_option, _set_option_tmpl)
reset_option = CallableDynamicDoc(_reset_option, _reset_option_tmpl)
describe_option = CallableDynamicDoc(_describe_option, _describe_option_tmpl)
options = DictWrapper(_global_config)
#
# Functions for use by pandas developers, in addition to User - api
class option_context:
"""
Context manager to temporarily set options in the `with` statement context.
You need to invoke as ``option_context(pat, val, [(pat, val), ...])``.
Examples
--------
>>> with option_context('interactive', True):
... print(options.interactive)
True
>>> options.interactive
False
"""
def __init__(self, *args):
if not (len(args) % 2 == 0 and len(args) >= 2):
raise ValueError(
'Need to invoke as'
'option_context(pat, val, [(pat, val), ...)).'
)
self.ops = list(zip(args[::2], args[1::2]))
def __enter__(self):
"""Create a backup of current options and define new ones."""
undo = []
for pat, val in self.ops:
undo.append((pat, _get_option(pat, silent=True)))
self.undo = undo
for pat, val in self.ops:
_set_option(pat, val, silent=True)
def __exit__(self, *args):
"""Rollback the options values defined before `with` statement."""
if self.undo:
for pat, val in self.undo:
_set_option(pat, val, silent=True)
def register_option(key, defval, doc='', validator=None, cb=None):
"""Register an option in the package-wide ibis config object.
Parameters
----------
key
a fully-qualified key, e.g. "x.y.option - z".
defval
the default value of the option
doc
a string description of the option
validator
a function of a single argument, should raise `ValueError` if
called with a value which is not a legal value for the option.
cb
a function of a single argument "key", which is called
immediately after an option value is set/reset. key is
the full name of the option.
Raises
------
ValueError if `validator` is specified and `defval` is not a valid value.
"""
import keyword
import tokenize
key = key.lower()
if key in _registered_options:
raise OptionError("Option '%s' has already been registered" % key)
if key in _reserved_keys:
raise OptionError("Option '%s' is a reserved key" % key)
# the default value should be legal
if validator:
validator(defval)
# walk the nested dict, creating dicts as needed along the path
path = key.split('.')
for k in path:
if not bool(re.match('^' + tokenize.Name + '$', k)):
raise ValueError("%s is not a valid identifier" % k)
if keyword.iskeyword(k):
raise ValueError("%s is a python keyword" % k)
cursor = _global_config
for i, p in enumerate(path[:-1]):
if not isinstance(cursor, dict):
raise OptionError(
"Path prefix to option '%s' is already an option"
% '.'.join(path[:i])
)
if p not in cursor:
cursor[p] = {}
cursor = cursor[p]
if not isinstance(cursor, dict):
raise OptionError(
"Path prefix to option '%s' is already an option"
% '.'.join(path[:-1])
)
cursor[path[-1]] = defval # initialize
# save the option metadata
_registered_options[key] = RegisteredOption(
key=key, defval=defval, doc=doc, validator=validator, cb=cb
)
def deprecate_option(
key: str, msg: str = None, rkey: str = None, removal_ver: str = None
):
"""Mark option `key` as deprecated.
If code attempts to access this option, a warning will be produced,
using `msg` if given, or a default message if not.
if `rkey` is given, any access to the key will be re-routed to `rkey`.
Neither the existence of `key` nor that if `rkey` is checked. If they
do not exist, any subsequence access will fail as usual, after the
deprecation warning is given.
Parameters
----------
key : string
the name of the option to be deprecated. must be a fully-qualified
option name (e.g "x.y.z.rkey").
msg : string, optional
a warning message to output when the key is referenced.
if no message is given a default message will be emitted.
rkey : string, optional
the name of an option to reroute access to.
If specified, any referenced `key` will be re-routed to `rkey`
including set/get/reset. rkey must be a fully-qualified option name
(e.g "x.y.z.rkey"). used by the default message if no `msg` is
specified.
removal_ver : string, optional
specifies the version in which this option will be removed. used by
the default message if no `msg` is specified.
Raises
------
OptionError - if key has already been deprecated.
"""
key = key.lower()
if key in _deprecated_options:
raise OptionError(
"Option '%s' has already been defined as deprecated." % key
)
_deprecated_options[key] = DeprecatedOption(key, msg, rkey, removal_ver)
# functions internal to the module
def _select_options(pat: str) -> list:
"""Return a list of keys matching `pat`.
Parameters
----------
pat : string
if pat=="all", returns all registered options
Returns
-------
list
"""
# short-circuit for exact key
if pat in _registered_options:
return [pat]
# else look through all of them
keys = sorted(_registered_options.keys())
if pat == 'all': # reserved key
return keys
return [k for k in keys if re.search(pat, k, re.I)]
def _get_root(key: str) -> tuple:
"""Return the parent node of an option.
Parameters
----------
key : string
Returns
-------
tuple
"""
path = key.split('.')
cursor = _global_config
for p in path[:-1]:
cursor = cursor[p]
return cursor, path[-1]
def _is_deprecated(key: str) -> bool:
"""Check if the option is deprecated.
Parameters
----------
key : string
Returns
-------
bool
Return True if the given option has been deprecated
"""
key = key.lower()
return key in _deprecated_options
def _get_deprecated_option(key: str):
"""
Retrieve the metadata for a deprecated option, if `key` is deprecated.
Parameters
----------
key : string
Returns
-------
DeprecatedOption (namedtuple) if key is deprecated, None otherwise
"""
try:
d = _deprecated_options[key]
except KeyError:
return None
else:
return d
def _get_registered_option(key: str):
"""
Retrieve the option metadata if `key` is a registered option.
Parameters
----------
key : string
Returns
-------
RegisteredOption (namedtuple) if key is deprecated, None otherwise
"""
return _registered_options.get(key)
def _translate_key(key: str):
"""Translate a key if necessary.
If key id deprecated and a replacement key defined, will return the
replacement key, otherwise returns `key` as - is
Parameters
----------
key : string
"""
d = _get_deprecated_option(key)
if d:
return d.rkey or key
else:
return key
def _warn_if_deprecated(key):
"""
Check if `key` is a deprecated option and if so, prints a warning.
Returns
-------
bool
True if `key` is deprecated, False otherwise.
"""
d = _get_deprecated_option(key)
if d:
if d.msg:
print(d.msg)
warnings.warn(d.msg, DeprecationWarning)
else:
msg = "'%s' is deprecated" % key
if d.removal_ver:
msg += ' and will be removed in %s' % d.removal_ver
if d.rkey:
msg += ", please use '%s' instead." % d.rkey
else:
msg += ', please refrain from using it.'
warnings.warn(msg, DeprecationWarning)
return True
return False
def _build_option_description(k: str) -> str:
"""Build a formatted description of a registered option and prints it.
Parameters
----------
k : string
Returns
-------
str
"""
o = _get_registered_option(k)
d = _get_deprecated_option(k)
buf = ['{} '.format(k)]
if o.doc:
doc = '\n'.join(o.doc.strip().splitlines())
else:
doc = 'No description available.'
buf.append(doc)
if o:
buf.append(
'\n [default: {}] [currently: {}]'.format(
o.defval, _get_option(k, True)
)
)
if d:
buf.append(
'\n (Deprecated{})'.format(
', use `{}` instead.'.format(d.rkey) if d.rkey else ''
)
)
buf.append('\n\n')
return ''.join(buf)
def pp_options_list(keys: str, width: int = 80, _print: bool = False) -> str:
"""
Build a concise listing of available options, grouped by prefix.
Parameters
----------
keys : string
width : int
_print : bool
Returns
-------
string
"""
from itertools import groupby
from textwrap import wrap
def pp(name, ks):
pfx = '- ' + name + '.[' if name else ''
ls = wrap(
', '.join(ks),
width,
initial_indent=pfx,
subsequent_indent=' ',
break_long_words=False,
)
if ls and ls[-1] and name:
ls[-1] = ls[-1] + ']'
return ls
ls = []
singles = [x for x in sorted(keys) if x.find('.') < 0]
if singles:
ls += pp('', singles)
keys = [x for x in keys if x.find('.') >= 0]
for k, g in groupby(sorted(keys), lambda x: x[: x.rfind('.')]):
ks = [x[len(k) + 1 :] for x in list(g)]
ls += pp(k, ks)
s = '\n'.join(ls)
if _print:
print(s)
else:
return s
# helpers
@contextmanager
def config_prefix(prefix):
"""Create a Context Manager for multiple invocations using a common prefix.
Context Manager for multiple invocations of API with a common prefix
supported API functions: (register / get / set )__option
Warning: This is not thread - safe, and won't work properly if you import
the API functions into your module using the "from x import y" construct.
Examples
--------
import ibis.config as cf
with cf.config_prefix("display.font"):
cf.register_option("color", "red")
cf.register_option("size", " 5 pt")
cf.set_option(size, " 6 pt")
cf.get_option(size)
...
etc'
will register options "display.font.color", "display.font.size", set the
value of "display.font.size"... and so on.
"""
# Note: reset_option relies on set_option, and on key directly
# it does not fit in to this monkey-patching scheme
global register_option, get_option, set_option, reset_option
def wrap(func):
def inner(key, *args, **kwds):
pkey = '%s.%s' % (prefix, key)
return func(pkey, *args, **kwds)
return inner
_register_option = register_option
_get_option = get_option
_set_option = set_option
set_option = wrap(set_option)
get_option = wrap(get_option)
register_option = wrap(register_option)
yield None
set_option = _set_option
get_option = _get_option
register_option = _register_option
# These factories and methods are handy for use as the validator
# arg in register_option
def is_type_factory(_type) -> Callable:
"""Create a function that checks the type of an object.
The function returned check if the type of a given object is the same of
the type `_type` given to the function factory.
Parameters
----------
_type
a type to be compared against (e.g. type(x) == `_type`)
Returns
-------
validator : function
a function of a single argument x , which returns the
True if type(x) is equal to `_type`.
"""
# checking function
def inner(x):
"""Check if the type of a given object is equals to the given type."""
if type(x) != _type:
raise ValueError("Value must have type '%s'" % str(_type))
return inner
def is_instance_factory(_type) -> Callable:
"""Create a function that checks if an object is instance of a given type.
Parameters
----------
`_type` - the type to be checked against
Returns
-------
validator : function
a function of a single argument x , which returns the
True if x is an instance of `_type`
"""
if isinstance(_type, (tuple, list)):
_type = tuple(_type)
type_repr = "|".join(map(str, _type))
else:
type_repr = "'%s'" % _type
def inner(x):
if not isinstance(x, _type):
raise ValueError("Value must be an instance of %s" % type_repr)
return inner
def is_one_of_factory(legal_values: list) -> Callable:
"""
Create a function that check if a given value is in the given list.
Parameters
----------
legal_values : list
Returns
-------
validator : function
"""
# checking function
def inner(x):
"""
Check if the given value is in the list given to the factory function.
Parameters
----------
x : object
Raises
------
ValueError
If the given value is not in the list given to the factory
function.
"""
if x not in legal_values:
pp_values = map(str, legal_values)
raise ValueError(
"Value must be one of %s" % str("|".join(pp_values))
)
return inner
# common type validators, for convenience
# usage: register_option(... , validator = is_int)
is_int = is_type_factory(int)
is_bool = is_type_factory(bool)
is_float = is_type_factory(float)
is_str = is_type_factory(str)
is_text = is_instance_factory((str, bytes))
|
swidbsk/freeopcua | refs/heads/master | tests/gtest/test/gtest_env_var_test.py | 2408 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly parses environment variables."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name == 'nt'
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_env_var_test_')
environ = os.environ.copy()
def AssertEq(expected, actual):
if expected != actual:
print 'Expected: %s' % (expected,)
print ' Actual: %s' % (actual,)
raise AssertionError
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def GetFlag(flag):
"""Runs gtest_env_var_test_ and returns its output."""
args = [COMMAND]
if flag is not None:
args += [flag]
return gtest_test_utils.Subprocess(args, env=environ).output
def TestFlag(flag, test_val, default_val):
"""Verifies that the given flag is affected by the corresponding env var."""
env_var = 'GTEST_' + flag.upper()
SetEnvVar(env_var, test_val)
AssertEq(test_val, GetFlag(flag))
SetEnvVar(env_var, None)
AssertEq(default_val, GetFlag(flag))
class GTestEnvVarTest(gtest_test_utils.TestCase):
def testEnvVarAffectsFlag(self):
"""Tests that environment variable should affect the corresponding flag."""
TestFlag('break_on_failure', '1', '0')
TestFlag('color', 'yes', 'auto')
TestFlag('filter', 'FooTest.Bar', '*')
TestFlag('output', 'xml:tmp/foo.xml', '')
TestFlag('print_time', '0', '1')
TestFlag('repeat', '999', '1')
TestFlag('throw_on_failure', '1', '0')
TestFlag('death_test_style', 'threadsafe', 'fast')
TestFlag('catch_exceptions', '0', '1')
if IS_LINUX:
TestFlag('death_test_use_fork', '1', '0')
TestFlag('stack_trace_depth', '0', '100')
if __name__ == '__main__':
gtest_test_utils.Main()
|
hiway/micropython | refs/heads/master | tests/extmod/ticks_diff.py | 37 | from utime import ticks_diff, ticks_add
MAX = ticks_add(0, -1)
# Should be done like this to avoid small int overflow
MODULO_HALF = MAX // 2 + 1
# Invariants:
# if ticks_diff(a, b) = c,
# then ticks_diff(b, a) = -c
assert ticks_diff(1, 0) == 1, ticks_diff(1, 0)
assert ticks_diff(0, 1) == -1
assert ticks_diff(0, MAX) == 1
assert ticks_diff(MAX, 0) == -1
assert ticks_diff(0, MAX - 1) == 2
# Maximum "positive" distance
assert ticks_diff(MODULO_HALF, 1) == MODULO_HALF - 1, ticks_diff(MODULO_HALF, 1)
# Step further, and it becomes a negative distance
assert ticks_diff(MODULO_HALF, 0) == -MODULO_HALF
# Offsetting that in either direction doesn't affect the result
off = 100
# Cheating and skipping to use ticks_add() when we know there's no wraparound
# Real apps should use always it.
assert ticks_diff(MODULO_HALF + off, 1 + off) == MODULO_HALF - 1
assert ticks_diff(MODULO_HALF + off, 0 + off) == -MODULO_HALF
assert ticks_diff(MODULO_HALF - off, ticks_add(1, -off)) == MODULO_HALF - 1
assert ticks_diff(MODULO_HALF - off, ticks_add(0, -off)) == -MODULO_HALF
print("OK")
|
fosdick/weaver | refs/heads/master | filebin.py | 1 | with open("decimal-strings/sqrt5file.txt", "rb") as binary_file:
st = binary_file.read()
str = ' '.join(format(ord(x), 'b') for x in st)
print(str)
|
Teino1978-Corp/pre-commit | refs/heads/master | testing/fixtures.py | 4 | from __future__ import absolute_import
from __future__ import unicode_literals
import io
import os.path
from aspy.yaml import ordered_dump
import pre_commit.constants as C
from pre_commit.clientlib.validate_config import CONFIG_JSON_SCHEMA
from pre_commit.clientlib.validate_config import validate_config_extra
from pre_commit.clientlib.validate_manifest import load_manifest
from pre_commit.jsonschema_extensions import apply_defaults
from pre_commit.ordereddict import OrderedDict
from pre_commit.util import cmd_output
from pre_commit.util import cwd
from testing.util import copy_tree_to_path
from testing.util import get_head_sha
from testing.util import get_resource_path
def git_dir(tmpdir_factory):
path = tmpdir_factory.get()
with cwd(path):
cmd_output('git', 'init')
return path
def make_repo(tmpdir_factory, repo_source):
path = git_dir(tmpdir_factory)
copy_tree_to_path(get_resource_path(repo_source), path)
with cwd(path):
cmd_output('git', 'add', '.')
cmd_output('git', 'commit', '-m', 'Add hooks')
return path
def config_with_local_hooks():
return OrderedDict((
('repo', 'local'),
('hooks', [OrderedDict((
('id', 'do_not_commit'),
('name', 'Block if "DO NOT COMMIT" is found'),
('entry', 'DO NOT COMMIT'),
('language', 'pcre'),
('files', '^(.*)$'),
))])
))
def make_config_from_repo(repo_path, sha=None, hooks=None, check=True):
manifest = load_manifest(os.path.join(repo_path, C.MANIFEST_FILE))
config = OrderedDict((
('repo', repo_path),
('sha', sha or get_head_sha(repo_path)),
(
'hooks',
hooks or [OrderedDict((('id', hook['id']),)) for hook in manifest],
),
))
if check:
wrapped_config = apply_defaults([config], CONFIG_JSON_SCHEMA)
validate_config_extra(wrapped_config)
return wrapped_config[0]
else:
return config
def write_config(directory, config):
if type(config) is not list:
assert type(config) is OrderedDict
config = [config]
with io.open(os.path.join(directory, C.CONFIG_FILE), 'w') as config_file:
config_file.write(ordered_dump(config, **C.YAML_DUMP_KWARGS))
def add_config_to_repo(git_path, config):
write_config(git_path, config)
with cwd(git_path):
cmd_output('git', 'add', C.CONFIG_FILE)
cmd_output('git', 'commit', '-m', 'Add hooks config')
return git_path
def make_consuming_repo(tmpdir_factory, repo_source):
path = make_repo(tmpdir_factory, repo_source)
config = make_config_from_repo(path)
git_path = git_dir(tmpdir_factory)
return add_config_to_repo(git_path, config)
|
soumide1102/pycbc | refs/heads/master | pycbc/io/inference_hdf.py | 1 | # Copyright (C) 2016 Christopher M. Biwer
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# self.option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""This modules defines functions for reading and writing samples that the
inference samplers generate.
"""
import os
import sys
import h5py
import numpy
import logging
from pycbc import DYN_RANGE_FAC
from pycbc.types import FrequencySeries
from pycbc.waveform import parameters as wfparams
import pycbc.inference.sampler
import pycbc.inference.likelihood
from pycbc.io import FieldArray
class _PosteriorOnlyParser(object):
"""Provides interface for reading/writing samples from/to an InferenceFile
that contains flattened posterior samples.
"""
@staticmethod
def _read_fields(fp, fields_group, fields, array_class,
thin_start=None, thin_interval=None, thin_end=None,
iteration=None):
"""Reads fields from the given file.
"""
if iteration is not None:
get_index = iteration
else:
get_index = fp.get_slice(thin_start=thin_start, thin_end=thin_end,
thin_interval=thin_interval)
# load
arrays = {}
group = fields_group + '/{}'
arrays = {field: fp[group.format(field)][get_index]
for field in fields}
return array_class.from_kwargs(**arrays)
@classmethod
def read_samples(cls, fp, parameters, samples_group=None,
thin_start=0, thin_end=None, thin_interval=1,
iteration=None, array_class=None):
"""Reads posterior samples from a posterior-only file.
"""
# get the group to load from
if samples_group is None:
samples_group = fp.samples_group
# get the type of array class to use
if array_class is None:
array_class = FieldArray
# get the names of fields needed for the given parameters
possible_fields = fp[samples_group].keys()
loadfields = array_class.parse_parameters(parameters, possible_fields)
return cls._read_fields(fp, samples_group, loadfields, array_class,
thin_start=thin_start,
thin_interval=thin_interval, thin_end=thin_end,
iteration=iteration)
@staticmethod
def write_samples_group(fp, samples_group, fields, samples):
"""Writes the given samples to the given samples group.
"""
for field in samples.fieldnames:
grp = '{}/{}'.format(samples_group, field)
fp[grp] = samples[field]
@classmethod
def n_independent_samples(cls, fp):
"""Returns the number of independent samples stored in the file.
"""
return cls.read_samples(fp, fp.variable_args[0]).size
class InferenceFile(h5py.File):
""" A subclass of the h5py.File object that has extra functions for
handling reading and writing the samples from the samplers.
Parameters
-----------
path : str
The path to the HDF file.
mode : {None, str}
The mode to open the file, eg. "w" for write and "r" for read.
"""
name = "hdf"
samples_group = 'samples'
stats_group = 'likelihood_stats'
sampler_group = 'sampler_states'
def __init__(self, path, mode=None, **kwargs):
super(InferenceFile, self).__init__(path, mode, **kwargs)
@property
def posterior_only(self):
"""Whether the file only contains flattened posterior samples.
"""
try:
return self.attrs['posterior_only']
except KeyError:
return False
@property
def sampler_name(self):
"""Returns the name of the sampler that was used."""
return self.attrs["sampler"]
@property
def sampler_class(self):
"""Returns the sampler class that was used."""
try:
sampler = self.sampler_name
except KeyError:
return None
return pycbc.inference.sampler.samplers[sampler]
@property
def samples_parser(self):
"""Returns the class to use to read/write samples from/to the file."""
if self.posterior_only:
return _PosteriorOnlyParser
else:
return self.sampler_class
@property
def likelihood_eval_name(self):
"""Returns the name of the likelihood evaluator that was used."""
return self.attrs["likelihood_evaluator"]
@property
def variable_args(self):
"""Returns list of variable_args.
Returns
-------
variable_args : {list, str}
List of str that contain variable_args keys.
"""
return self.attrs["variable_args"]
@property
def static_args(self):
"""Returns a dictionary of the static_args. The keys are the argument
names, values are the value they were set to.
"""
return dict([[arg, self.attrs[arg]]
for arg in self.attrs["static_args"]])
@property
def sampling_args(self):
"""Returns the parameters that were used to sample.
Returns
-------
sampling_args : {list, str}
List of the sampling args.
"""
return self.attrs["sampling_args"]
@property
def lognl(self):
"""Returns the log noise likelihood."""
return self.attrs["lognl"]
@property
def niterations(self):
"""Returns number of iterations performed.
Returns
-------
niterations : int
Number of iterations performed.
"""
return self.attrs["niterations"]
@property
def n_independent_samples(self):
"""Returns the number of independent samples stored in the file.
"""
return self.samples_parser.n_independent_samples(self)
@property
def burn_in_iterations(self):
"""Returns number of iterations in the burn in.
"""
try:
return self.attrs["burn_in_iterations"]
except KeyError:
# wasn't written; assume the last
return self.niterations
@property
def is_burned_in(self):
"""Returns whether or not the sampler is burned in.
"""
try:
return self.attrs["is_burned_in"]
except KeyError:
# wasn't written; assume False
return False
@property
def nwalkers(self):
"""Returns number of walkers used.
Returns
-------
nwalkesr : int
Number of walkers used.
"""
return self.attrs["nwalkers"]
@property
def ntemps(self):
"""Returns number of temperatures used."""
return self.attrs["ntemps"]
@property
def acl(self):
""" Returns the saved autocorelation length (ACL).
Returns
-------
acl : {int, float}
The ACL.
"""
return self.attrs["acl"]
@property
def cmd(self):
"""Returns the (last) saved command line.
If the file was created from a run that resumed from a checkpoint, only
the last command line used is returned.
Returns
-------
cmd : string
The command line that created this InferenceFile.
"""
cmd = self.attrs["cmd"]
if isinstance(cmd, numpy.ndarray):
cmd = cmd[-1]
return cmd
@property
def resume_points(self):
"""The iterations at which a run was resumed from checkpoint.
Returns
-------
resume_points : array or None
An array of integers giving the points at which the run resumed.
Raises
------
KeyError
If the run never resumed from a checkpoint.
"""
return self.attrs['resume_points']
@property
def log_evidence(self):
"""Returns the log of the evidence and its error, if they exist in the
file. Raises a KeyError otherwise.
"""
return self.attrs["log_evidence"], self.attrs["dlog_evidence"]
def read_samples(self, parameters, samples_group=None, **kwargs):
"""Reads samples from the file.
Parameters
-----------
parameters : (list of) strings
The parameter(s) to retrieve. A parameter can be the name of any
field in `samples_group`, a virtual field or method of
`FieldArray` (as long as the file contains the necessary fields
to derive the virtual field or method), and/or a function of
these.
samples_group : str
Group in HDF InferenceFile that parameters belong to.
\**kwargs :
The rest of the keyword args are passed to the sampler's
`read_samples` method.
Returns
-------
FieldArray
Samples for the given parameters, as an instance of a
FieldArray.
"""
# get the appropriate sampler class
samples_group = samples_group if samples_group else self.samples_group
return self.samples_parser.read_samples(self, parameters,
samples_group=samples_group,
**kwargs)
def read_likelihood_stats(self, **kwargs):
"""Reads likelihood stats from self.
Parameters
-----------
\**kwargs :
The keyword args are passed to the sampler's `read_likelihood_stats`
method.
Returns
-------
stats : {FieldArray, None}
Likelihood stats in the file, as a FieldArray. The fields of the
array are the names of the stats that are in the `likelihood_stats`
group.
"""
parameters = self[self.stats_group].keys()
return self.read_samples(parameters, samples_group=self.stats_group,
**kwargs)
def read_acceptance_fraction(self, **kwargs):
"""Returns the acceptance fraction that was written to the file.
Parameters
----------
\**kwargs :
All keyword arguments are passed to the sampler's
`read_acceptance_fraction` function.
Returns
-------
numpy.array
The acceptance fraction.
"""
return self.sampler_class.read_acceptance_fraction(self, **kwargs)
def read_acls(self):
"""Returns all of the individual chains' acls. See the `read_acls`
function of this file's sampler for more details.
"""
return self.sampler_class.read_acls(self)
def read_label(self, parameter, error_on_none=False):
"""Returns the label for the parameter.
Parameters
-----------
parameter : str
Name of parameter to get a label for. Will first try to retrieve
a label from this file's "label" attributes. If the parameter
is not found there, will look for a label from
pycbc.waveform.parameters.
error_on_none : {False, bool}
If True, will raise a ValueError if a label cannot be found, or if
the label is None. Otherwise, the parameter will just be returned
if no label can be found.
Returns
-------
label : str
A formatted string for the name of the paramter.
"""
# get label
try:
label = self[parameter].attrs["label"]
except KeyError:
# try looking in pycbc.waveform.parameters
try:
label = getattr(wfparams, parameter).label
except AttributeError:
label = None
if label is None:
if error_on_none:
raise ValueError("Cannot find a label for paramter %s" %(
parameter))
else:
return parameter
return label
def read_random_state(self, group=None):
""" Reads the state of the random number generator from the file.
Parameters
----------
group : str
Name of group to read random state from.
Returns
-------
tuple
A tuple with 5 elements that can be passed to numpy.set_state.
"""
group = self.sampler_group if group is None else group
dataset_name = "/".join([group, "random_state"])
arr = self[dataset_name][:]
s = self[dataset_name].attrs["s"]
pos = self[dataset_name].attrs["pos"]
has_gauss = self[dataset_name].attrs["has_gauss"]
cached_gauss = self[dataset_name].attrs["cached_gauss"]
return s, arr, pos, has_gauss, cached_gauss
def write_strain(self, strain_dict, group=None):
"""Writes strain for each IFO to file.
Parameters
-----------
strain : {dict, FrequencySeries}
A dict of FrequencySeries where the key is the IFO.
group : {None, str}
The group to write the strain to. If None, will write to the top
level.
"""
subgroup = "{ifo}/strain"
if group is None:
group = subgroup
else:
group = '/'.join([group, subgroup])
for ifo,strain in strain_dict.items():
self[group.format(ifo=ifo)] = strain
self[group.format(ifo=ifo)].attrs['delta_t'] = strain.delta_t
self[group.format(ifo=ifo)].attrs['start_time'] = \
float(strain.start_time)
def write_stilde(self, stilde_dict, group=None):
"""Writes stilde for each IFO to file.
Parameters
-----------
stilde : {dict, FrequencySeries}
A dict of FrequencySeries where the key is the IFO.
group : {None, str}
The group to write the strain to. If None, will write to the top
level.
"""
subgroup = "{ifo}/stilde"
if group is None:
group = subgroup
else:
group = '/'.join([group, subgroup])
for ifo,stilde in stilde_dict.items():
self[group.format(ifo=ifo)] = stilde
self[group.format(ifo=ifo)].attrs['delta_f'] = stilde.delta_f
self[group.format(ifo=ifo)].attrs['epoch'] = float(stilde.epoch)
def write_psd(self, psds, low_frequency_cutoff, group=None):
"""Writes PSD for each IFO to file.
Parameters
-----------
psds : {dict, FrequencySeries}
A dict of FrequencySeries where the key is the IFO.
low_frequency_cutoff : {dict, float}
A dict of the low-frequency cutoff where the key is the IFO. The
minimum value will be stored as an attr in the File.
group : {None, str}
The group to write the strain to. If None, will write to the top
level.
"""
subgroup = "{ifo}/psds/0"
if group is None:
group = subgroup
else:
group = '/'.join([group, subgroup])
self.attrs["low_frequency_cutoff"] = min(low_frequency_cutoff.values())
for ifo in psds:
self[group.format(ifo=ifo)] = psds[ifo]
self[group.format(ifo=ifo)].attrs['delta_f'] = psds[ifo].delta_f
def write_data(self, strain_dict=None, stilde_dict=None,
psd_dict=None, low_frequency_cutoff_dict=None,
group=None):
"""Writes the strain/stilde/psd.
Parameters
----------
strain_dict : {None, dict}
A dictionary of strains. If None, no strain will be written.
stilde_dict : {None, dict}
A dictionary of stilde. If None, no stilde will be written.
psd_dict : {None, dict}
A dictionary of psds. If None, no psds will be written.
low_freuency_cutoff_dict : {None, dict}
A dictionary of low frequency cutoffs used for each detector in
`psd_dict`; must be provided if `psd_dict` is not None.
group : {None, str}
The group to write the strain to. If None, will write to the top
level.
"""
# save PSD
if psd_dict is not None:
if low_frequency_cutoff_dict is None:
raise ValueError("must provide low_frequency_cutoff_dict if "
"saving psds to output")
# apply dynamic range factor for saving PSDs since
# plotting code expects it
psd_dyn_dict = {}
for key,val in psd_dict.iteritems():
psd_dyn_dict[key] = FrequencySeries(val*DYN_RANGE_FAC**2,
delta_f=val.delta_f)
self.write_psd(psds=psd_dyn_dict,
low_frequency_cutoff=low_frequency_cutoff_dict,
group=group)
# save stilde
if stilde_dict is not None:
self.write_stilde(stilde_dict, group=group)
# save strain if desired
if strain_dict is not None:
self.write_strain(strain_dict, group=group)
def write_injections(self, injection_file, ifo):
""" Writes injection parameters for an IFO to file.
Parameters
----------
injection_file : str
Path to HDF injection file.
ifo : str
IFO name.
"""
subgroup = "{ifo}/injections"
self.create_group(subgroup.format(ifo=ifo))
try:
with h5py.File(injection_file, "r") as fp:
for param in fp.keys():
self[subgroup.format(ifo=ifo)][param] = fp[param][:]
for key in fp.attrs.keys():
self[subgroup.format(ifo=ifo)].attrs[key] = fp.attrs[key]
except IOError:
logging.warn("Could not read %s as an HDF file", injection_file)
def write_command_line(self):
"""Writes command line to attributes.
The command line is written to the file's ``attrs['cmd']``. If this
attribute already exists in the file (this can happen when resuming
from a checkpoint), ``attrs['cmd']`` will be a list storing the current
command line and all previous command lines.
"""
cmd = [" ".join(sys.argv)]
try:
previous = self.attrs["cmd"]
if isinstance(previous, str):
# convert to list
previous = [previous]
elif isinstance(previous, numpy.ndarray):
previous = previous.tolist()
except KeyError:
previous = []
self.attrs["cmd"] = cmd + previous
def write_resume_point(self):
"""Keeps a list of the number of iterations that were in a file when a
run was resumed from a checkpoint."""
try:
resume_pts = self.attrs["resume_points"].tolist()
except KeyError:
resume_pts = []
try:
niterations = self.niterations
except KeyError:
niterations = 0
resume_pts.append(niterations)
self.attrs["resume_points"] = resume_pts
def write_random_state(self, group=None, state=None):
""" Writes the state of the random number generator from the file.
Parameters
----------
group : str
Name of group to read random state to.
state : tuple, optional
Specify the random state to write. If None, will use
``numpy.random.get_state()``.
"""
group = self.sampler_group if group is None else group
dataset_name = "/".join([group, "random_state"])
if state is None:
state = numpy.random.get_state()
s, arr, pos, has_gauss, cached_gauss = state
if group in self:
self[dataset_name][:] = arr
else:
self.create_dataset(dataset_name, arr.shape, fletcher32=True,
dtype=arr.dtype)
self[dataset_name][:] = arr
self[dataset_name].attrs["s"] = s
self[dataset_name].attrs["pos"] = pos
self[dataset_name].attrs["has_gauss"] = has_gauss
self[dataset_name].attrs["cached_gauss"] = cached_gauss
def get_slice(self, thin_start=None, thin_interval=None, thin_end=None):
"""Formats a slice using the given arguments that can be used to
retrieve a thinned array from an InferenceFile.
Parameters
----------
thin_start : {None, int}
The starting index to use. If None, will try to retrieve the
`burn_in_iterations` from the given file. If no
`burn_in_iterations` exists, will default to the start of the
array.
thin_interval : {None, int}
The interval to use. If None, will try to retrieve the acl from the
given file. If no acl attribute exists, will default to 1.
thin_end : {None, int}
The end index to use. If None, will retrieve to the end of the
array.
Returns
-------
slice :
The slice needed.
"""
# default is to skip burn in samples
if thin_start is None:
try:
thin_start = self.burn_in_iterations
# if the sampler hasn't burned in, the burn_in_iterations will
# be the same as the number of iterations, which would result
# in 0 samples. In that case, just use the last one
if thin_start == self.niterations:
thin_start = thin_start - 1
except KeyError:
pass
# default is to use stored ACL and accept every i-th sample
if thin_interval is None:
try:
thin_interval = int(numpy.ceil(self.acl))
except KeyError:
pass
return slice(thin_start, thin_end, thin_interval)
def copy_metadata(self, other):
"""Copies all metadata from this file to the other file.
Metadata is defined as all data that is not in either the samples or
stats group.
Parameters
----------
other : InferenceFile
An open inference file to write the data to.
"""
logging.info("Copying metadata")
# copy non-samples/stats data
for key in self.keys():
if key not in [self.samples_group, self.stats_group]:
super(InferenceFile, self).copy(key, other)
# copy attributes
for key in self.attrs.keys():
other.attrs[key] = self.attrs[key]
def copy(self, other, parameters=None, parameter_names=None,
posterior_only=False, **kwargs):
"""Copies data in this file to another file.
The samples and stats to copy may be down selected using the given
kwargs. All other data (the "metadata") are copied exactly.
Parameters
----------
other : str or InferenceFile
The file to write to. May be either a string giving a filename,
or an open hdf file. If the former, the file will be opened with
the write attribute (note that if a file already exists with that
name, it will be deleted).
parameters : list of str, optional
List of parameters to copy. If None, will copy all parameters.
parameter_names : dict, optional
Rename one or more parameters to the given name. The dictionary
should map parameter -> parameter name. If None, will just use the
original parameter names.
posterior_only : bool, optional
Write the samples and likelihood stats as flattened arrays, and
set other's posterior_only attribute. For example, if this file
has a parameter's samples written to
`{samples_group}/{param}/walker{x}`, then other will have all of
the selected samples from all walkers written to
`{samples_group}/{param}/`.
\**kwargs :
All other keyword arguments are passed to `read_samples`.
Returns
-------
InferenceFile
The open file handler to other.
"""
if not isinstance(other, h5py.File):
# check that we're not trying to overwrite this file
if other == self.name:
raise IOError("destination is the same as this file")
other = InferenceFile(other, 'w')
# copy metadata over
self.copy_metadata(other)
# update other's posterior attribute
if posterior_only:
other.attrs['posterior_only'] = posterior_only
# select the samples to copy
logging.info("Reading samples to copy")
if parameters is None:
parameters = self.variable_args
# if list of desired parameters is different, rename variable args
if set(parameters) != set(self.variable_args):
other.attrs['variable_args'] = parameters
# if only the posterior is desired, we'll flatten the results
if not posterior_only and not self.posterior_only:
kwargs['flatten'] = False
samples = self.read_samples(parameters, **kwargs)
logging.info("Copying {} samples".format(samples.size))
# if different parameter names are desired, get them from the samples
if parameter_names:
arrs = {pname: samples[p] for p,pname in parameter_names.items()}
arrs.update({p: samples[p] for p in parameters
if p not in parameter_names})
samples = FieldArray.from_kwargs(**arrs)
other.attrs['variable_args'] = samples.fieldnames
logging.info("Writing samples")
other.samples_parser.write_samples_group(other, self.samples_group,
samples.fieldnames, samples)
# do the same for the likelihood stats
logging.info("Reading stats to copy")
stats = self.read_likelihood_stats(**kwargs)
logging.info("Writing stats")
other.samples_parser.write_samples_group(other, self.stats_group,
stats.fieldnames, stats)
# if any down selection was done, re-set the burn in iterations and
# the acl, and the niterations.
# The last dimension of the samples returned by the sampler should
# be the number of iterations.
if samples.shape[-1] != self.niterations:
other.attrs['acl'] = 1
other.attrs['burn_in_iterations'] = 0
other.attrs['niterations'] = samples.shape[-1]
return other
def check_integrity(filename):
"""Checks the integrity of an InferenceFile.
Checks done are:
* can the file open?
* do all of the datasets in the samples group have the same shape?
* can the first and last sample in all of the datasets in the samples
group be read?
If any of these checks fail, an IOError is raised.
Parameters
----------
filename: str
Name of an InferenceFile to check.
Raises
------
ValueError
If the given file does not exist.
KeyError
If the samples group does not exist.
IOError
If any of the checks fail.
"""
# check that the file exists
if not os.path.exists(filename):
raise ValueError("file {} does not exist".format(filename))
# if the file is corrupted such that it cannot be opened, the next line
# will raise an IOError
with InferenceFile(filename, 'r') as fp:
# check that all datasets in samples have the same shape
parameters = fp[fp.samples_group].keys()
group = fp.samples_group + '/{}'
# use the first parameter as a reference shape
ref_shape = fp[group.format(parameters[0])].shape
if not all(fp[group.format(param)].shape == ref_shape
for param in parameters):
raise IOError("not all datasets in the samples group have the same "
"shape")
# check that we can read the first/last sample
firstidx = tuple([0]*len(ref_shape))
lastidx = tuple([-1]*len(ref_shape))
for param in parameters:
_ = fp[group.format(param)][firstidx]
_ = fp[group.format(param)][lastidx]
|
dcroc16/skunk_works | refs/heads/master | google_appengine/lib/django-1.5/tests/modeltests/get_or_create/models.py | 53 | """
33. get_or_create()
``get_or_create()`` does what it says: it tries to look up an object with the
given parameters. If an object isn't found, it creates one with the given
parameters.
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Person(models.Model):
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
birthday = models.DateField()
def __str__(self):
return '%s %s' % (self.first_name, self.last_name)
class ManualPrimaryKeyTest(models.Model):
id = models.IntegerField(primary_key=True)
data = models.CharField(max_length=100)
|
blampe/M2M | refs/heads/master | Markdown-2.0/markdown/extensions/toc.py | 17 | """
Table of Contents Extension for Python-Markdown
* * *
(c) 2008 [Jack Miller](http://codezen.org)
Dependencies:
* [Markdown 2.0+](http://www.freewisdom.org/projects/python-markdown/)
"""
import markdown
from markdown import etree
import re
class TocTreeprocessor(markdown.treeprocessors.Treeprocessor):
# Iterator wrapper to get parent and child all at once
def iterparent(self, root):
for parent in root.getiterator():
for child in parent:
yield parent, child
def run(self, doc):
div = etree.Element("div")
div.attrib["class"] = "toc"
last_li = None
# Add title to the div
if self.config["title"][0]:
header = etree.SubElement(div, "span")
header.attrib["class"] = "toctitle"
header.text = self.config["title"][0]
level = 0
list_stack=[div]
header_rgx = re.compile("[Hh][123456]")
# Get a list of id attributes
used_ids = []
for c in doc.getiterator():
if "id" in c.attrib:
used_ids.append(c.attrib["id"])
for (p, c) in self.iterparent(doc):
if not c.text:
continue
# To keep the output from screwing up the
# validation by putting a <div> inside of a <p>
# we actually replace the <p> in its entirety.
# We do not allow the marker inside a header as that
# would causes an enless loop of placing a new TOC
# inside previously generated TOC.
if c.text.find(self.config["marker"][0]) > -1 and not header_rgx.match(c.tag):
for i in range(len(p)):
if p[i] == c:
p[i] = div
break
if header_rgx.match(c.tag):
tag_level = int(c.tag[-1])
# Regardless of how many levels we jumped
# only one list should be created, since
# empty lists containing lists are illegal.
if tag_level < level:
list_stack.pop()
level = tag_level
if tag_level > level:
newlist = etree.Element("ul")
if last_li:
last_li.append(newlist)
else:
list_stack[-1].append(newlist)
list_stack.append(newlist)
level = tag_level
# Do not override pre-existing ids
if not "id" in c.attrib:
id = self.config["slugify"][0](c.text)
if id in used_ids:
ctr = 1
while "%s_%d" % (id, ctr) in used_ids:
ctr += 1
id = "%s_%d" % (id, ctr)
used_ids.append(id)
c.attrib["id"] = id
else:
id = c.attrib["id"]
# List item link, to be inserted into the toc div
last_li = etree.Element("li")
link = etree.SubElement(last_li, "a")
link.text = c.text
link.attrib["href"] = '#' + id
if int(self.config["anchorlink"][0]):
anchor = etree.SubElement(c, "a")
anchor.text = c.text
anchor.attrib["href"] = "#" + id
anchor.attrib["class"] = "toclink"
c.text = ""
list_stack[-1].append(last_li)
class TocExtension(markdown.Extension):
def __init__(self, configs):
self.config = { "marker" : ["[TOC]",
"Text to find and replace with Table of Contents -"
"Defaults to \"[TOC]\""],
"slugify" : [self.slugify,
"Function to generate anchors based on header text-"
"Defaults to a built in slugify function."],
"title" : [None,
"Title to insert into TOC <div> - "
"Defaults to None"],
"anchorlink" : [0,
"1 if header should be a self link"
"Defaults to 0"]}
for key, value in configs:
self.setConfig(key, value)
# This is exactly the same as Django's slugify
def slugify(self, value):
""" Slugify a string, to make it URL friendly. """
import unicodedata
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(re.sub('[^\w\s-]', '', value).strip().lower())
return re.sub('[-\s]+','-',value)
def extendMarkdown(self, md, md_globals):
tocext = TocTreeprocessor(md)
tocext.config = self.config
md.treeprocessors.add("toc", tocext, "_begin")
def makeExtension(configs={}):
return TocExtension(configs=configs)
|
sgubianpm/pygensa | refs/heads/master | sdaopt/benchmark/go_benchmark_functions/go_funcs_F.py | 47 | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from .go_benchmark import Benchmark
class FreudensteinRoth(Benchmark):
r"""
FreudensteinRoth objective function.
This class defines the Freudenstein & Roth [1]_ global optimization problem.
This is a multimodal minimization problem defined as follows:
.. math::
f_{\text{FreudensteinRoth}}(x) = \left\{x_1 - 13 + \left[(5 - x_2) x_2
- 2 \right] x_2 \right\}^2 + \left \{x_1 - 29
+ \left[(x_2 + 1) x_2 - 14 \right] x_2 \right\}^2
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [5, 4]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.custom_bounds = [(-3, 3), (-5, 5)]
self.global_optimum = [[5.0, 4.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
f1 = (-13.0 + x[0] + ((5.0 - x[1]) * x[1] - 2.0) * x[1]) ** 2
f2 = (-29.0 + x[0] + ((x[1] + 1.0) * x[1] - 14.0) * x[1]) ** 2
return f1 + f2
|
weidongxu84/info-gatherer | refs/heads/master | django/contrib/comments/forms.py | 92 | import time
from django import forms
from django.forms.util import ErrorDict
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.comments.models import Comment
from django.utils.crypto import salted_hmac, constant_time_compare
from django.utils.encoding import force_unicode
from django.utils.text import get_text_list
from django.utils import timezone
from django.utils.translation import ungettext, ugettext, ugettext_lazy as _
COMMENT_MAX_LENGTH = getattr(settings,'COMMENT_MAX_LENGTH', 3000)
class CommentSecurityForm(forms.Form):
"""
Handles the security aspects (anti-spoofing) for comment forms.
"""
content_type = forms.CharField(widget=forms.HiddenInput)
object_pk = forms.CharField(widget=forms.HiddenInput)
timestamp = forms.IntegerField(widget=forms.HiddenInput)
security_hash = forms.CharField(min_length=40, max_length=40, widget=forms.HiddenInput)
def __init__(self, target_object, data=None, initial=None):
self.target_object = target_object
if initial is None:
initial = {}
initial.update(self.generate_security_data())
super(CommentSecurityForm, self).__init__(data=data, initial=initial)
def security_errors(self):
"""Return just those errors associated with security"""
errors = ErrorDict()
for f in ["honeypot", "timestamp", "security_hash"]:
if f in self.errors:
errors[f] = self.errors[f]
return errors
def clean_security_hash(self):
"""Check the security hash."""
security_hash_dict = {
'content_type' : self.data.get("content_type", ""),
'object_pk' : self.data.get("object_pk", ""),
'timestamp' : self.data.get("timestamp", ""),
}
expected_hash = self.generate_security_hash(**security_hash_dict)
actual_hash = self.cleaned_data["security_hash"]
if not constant_time_compare(expected_hash, actual_hash):
raise forms.ValidationError("Security hash check failed.")
return actual_hash
def clean_timestamp(self):
"""Make sure the timestamp isn't too far (> 2 hours) in the past."""
ts = self.cleaned_data["timestamp"]
if time.time() - ts > (2 * 60 * 60):
raise forms.ValidationError("Timestamp check failed")
return ts
def generate_security_data(self):
"""Generate a dict of security data for "initial" data."""
timestamp = int(time.time())
security_dict = {
'content_type' : str(self.target_object._meta),
'object_pk' : str(self.target_object._get_pk_val()),
'timestamp' : str(timestamp),
'security_hash' : self.initial_security_hash(timestamp),
}
return security_dict
def initial_security_hash(self, timestamp):
"""
Generate the initial security hash from self.content_object
and a (unix) timestamp.
"""
initial_security_dict = {
'content_type' : str(self.target_object._meta),
'object_pk' : str(self.target_object._get_pk_val()),
'timestamp' : str(timestamp),
}
return self.generate_security_hash(**initial_security_dict)
def generate_security_hash(self, content_type, object_pk, timestamp):
"""
Generate a HMAC security hash from the provided info.
"""
info = (content_type, object_pk, timestamp)
key_salt = "django.contrib.forms.CommentSecurityForm"
value = "-".join(info)
return salted_hmac(key_salt, value).hexdigest()
class CommentDetailsForm(CommentSecurityForm):
"""
Handles the specific details of the comment (name, comment, etc.).
"""
name = forms.CharField(label=_("Name"), max_length=50)
email = forms.EmailField(label=_("Email address"))
url = forms.URLField(label=_("URL"), required=False)
comment = forms.CharField(label=_('Comment'), widget=forms.Textarea,
max_length=COMMENT_MAX_LENGTH)
def get_comment_object(self):
"""
Return a new (unsaved) comment object based on the information in this
form. Assumes that the form is already validated and will throw a
ValueError if not.
Does not set any of the fields that would come from a Request object
(i.e. ``user`` or ``ip_address``).
"""
if not self.is_valid():
raise ValueError("get_comment_object may only be called on valid forms")
CommentModel = self.get_comment_model()
new = CommentModel(**self.get_comment_create_data())
new = self.check_for_duplicate_comment(new)
return new
def get_comment_model(self):
"""
Get the comment model to create with this form. Subclasses in custom
comment apps should override this, get_comment_create_data, and perhaps
check_for_duplicate_comment to provide custom comment models.
"""
return Comment
def get_comment_create_data(self):
"""
Returns the dict of data to be used to create a comment. Subclasses in
custom comment apps that override get_comment_model can override this
method to add extra fields onto a custom comment model.
"""
return dict(
content_type = ContentType.objects.get_for_model(self.target_object),
object_pk = force_unicode(self.target_object._get_pk_val()),
user_name = self.cleaned_data["name"],
user_email = self.cleaned_data["email"],
user_url = self.cleaned_data["url"],
comment = self.cleaned_data["comment"],
submit_date = timezone.now(),
site_id = settings.SITE_ID,
is_public = True,
is_removed = False,
)
def check_for_duplicate_comment(self, new):
"""
Check that a submitted comment isn't a duplicate. This might be caused
by someone posting a comment twice. If it is a dup, silently return the *previous* comment.
"""
possible_duplicates = self.get_comment_model()._default_manager.using(
self.target_object._state.db
).filter(
content_type = new.content_type,
object_pk = new.object_pk,
user_name = new.user_name,
user_email = new.user_email,
user_url = new.user_url,
)
for old in possible_duplicates:
if old.submit_date.date() == new.submit_date.date() and old.comment == new.comment:
return old
return new
def clean_comment(self):
"""
If COMMENTS_ALLOW_PROFANITIES is False, check that the comment doesn't
contain anything in PROFANITIES_LIST.
"""
comment = self.cleaned_data["comment"]
if settings.COMMENTS_ALLOW_PROFANITIES == False:
bad_words = [w for w in settings.PROFANITIES_LIST if w in comment.lower()]
if bad_words:
raise forms.ValidationError(ungettext(
"Watch your mouth! The word %s is not allowed here.",
"Watch your mouth! The words %s are not allowed here.",
len(bad_words)) % get_text_list(
['"%s%s%s"' % (i[0], '-'*(len(i)-2), i[-1])
for i in bad_words], ugettext('and')))
return comment
class CommentForm(CommentDetailsForm):
honeypot = forms.CharField(required=False,
label=_('If you enter anything in this field '\
'your comment will be treated as spam'))
def clean_honeypot(self):
"""Check that nothing's been entered into the honeypot."""
value = self.cleaned_data["honeypot"]
if value:
raise forms.ValidationError(self.fields["honeypot"].label)
return value
|
akshayaurora/kivy | refs/heads/master | kivy/core/image/img_pil.py | 5 | '''
PIL: PIL image loader
'''
__all__ = ('ImageLoaderPIL', )
try:
import Image as PILImage
except ImportError:
# for python3
from PIL import Image as PILImage
from kivy.logger import Logger
from kivy.core.image import ImageLoaderBase, ImageData, ImageLoader
try:
# Pillow
PILImage.frombytes
PILImage.Image.tobytes
except AttributeError:
# PIL
# monkey patch frombytes and tobytes methods, refs:
# https://github.com/kivy/kivy/issues/5460
PILImage.frombytes = PILImage.frombuffer
PILImage.Image.tobytes = PILImage.Image.tostring
class ImageLoaderPIL(ImageLoaderBase):
'''Image loader based on the PIL library.
.. versionadded:: 1.0.8
Support for GIF animation added.
Gif animation has a lot of issues(transparency/color depths... etc).
In order to keep it simple, what is implemented here is what is
natively supported by the PIL library.
As a general rule, try to use gifs that have no transparency.
Gif's with transparency will work but be prepared for some
artifacts until transparency support is improved.
'''
@staticmethod
def can_save(fmt, is_bytesio):
if is_bytesio:
return False
return fmt in ImageLoaderPIL.extensions()
@staticmethod
def can_load_memory():
return True
@staticmethod
def extensions():
'''Return accepted extensions for this loader'''
PILImage.init()
return tuple((ext_with_dot[1:] for ext_with_dot in PILImage.EXTENSION))
def _img_correct(self, _img_tmp):
'''Convert image to the correct format and orientation.
'''
# image loader work only with rgb/rgba image
if _img_tmp.mode.lower() not in ('rgb', 'rgba'):
try:
imc = _img_tmp.convert('RGBA')
except:
Logger.warning(
'Image: Unable to convert image to rgba (was %s)' %
(_img_tmp.mode.lower()))
raise
_img_tmp = imc
return _img_tmp
def _img_read(self, im):
'''Read images from an animated file.
'''
im.seek(0)
# Read all images inside
try:
img_ol = None
while True:
img_tmp = im
img_tmp = self._img_correct(img_tmp)
if img_ol and (hasattr(im, 'dispose') and not im.dispose):
# paste new frame over old so as to handle
# transparency properly
img_ol.paste(img_tmp, (0, 0), img_tmp)
img_tmp = img_ol
img_ol = img_tmp
yield ImageData(img_tmp.size[0], img_tmp.size[1],
img_tmp.mode.lower(), img_tmp.tobytes())
im.seek(im.tell() + 1)
except EOFError:
pass
def load(self, filename):
try:
im = PILImage.open(filename)
except:
Logger.warning('Image: Unable to load image <%s>' % filename)
raise
# update internals
if not self._inline:
self.filename = filename
# returns an array of type ImageData len 1 if not a sequence image
return list(self._img_read(im))
@staticmethod
def save(filename, width, height, pixelfmt, pixels, flipped=False,
imagefmt=None):
image = PILImage.frombytes(pixelfmt.upper(), (width, height), pixels)
if flipped:
image = image.transpose(PILImage.FLIP_TOP_BOTTOM)
image.save(filename)
return True
# register
ImageLoader.register(ImageLoaderPIL)
|
ClearCorp-dev/odoo-costa-rica | refs/heads/8.0 | TODO-7.0/l10n_cr_account_financial_statements/__init__.py | 2 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import wizard
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Brainiq7/Ananse | refs/heads/master | ananse_dl/extractor/spike.py | 40 | from __future__ import unicode_literals
import re
from .mtv import MTVServicesInfoExtractor
class SpikeIE(MTVServicesInfoExtractor):
_VALID_URL = r'''(?x)https?://
(www\.spike\.com/(video-clips|episodes)/.+|
m\.spike\.com/videos/video.rbml\?id=(?P<mobile_id>[^&]+))
'''
_TEST = {
'url': 'http://www.spike.com/video-clips/lhtu8m/auction-hunters-can-allen-ride-a-hundred-year-old-motorcycle',
'md5': '1a9265f32b0c375793d6c4ce45255256',
'info_dict': {
'id': 'b9c8221a-4e50-479a-b86d-3333323e38ba',
'ext': 'mp4',
'title': 'Auction Hunters|Can Allen Ride A Hundred Year-Old Motorcycle?',
'description': 'md5:fbed7e82ed5fad493615b3094a9499cb',
},
}
_FEED_URL = 'http://www.spike.com/feeds/mrss/'
_MOBILE_TEMPLATE = 'http://m.spike.com/videos/video.rbml?id=%s'
def _real_extract(self, url):
mobj = re.search(self._VALID_URL, url)
mobile_id = mobj.group('mobile_id')
if mobile_id is not None:
url = 'http://www.spike.com/video-clips/%s' % mobile_id
return super(SpikeIE, self)._real_extract(url)
|
shazow/workerpool | refs/heads/master | samples/blockingworker.py | 2 | from workerpool import WorkerPool
"""
WARNING: This sample class is obsolete since version 0.9.2. It will be removed
or replaced soon.
"""
class BlockingWorkerPool(WorkerPool):
"""
Similar to WorkerPool but a result queue is passed in along with each job
and the method will block until the queue is filled with one entry per job.
Bulk job lists can be performed using the `contract` method.
"""
def put(self, job, result):
"Perform a job by a member in the pool and return the result."
self.job.put(job)
r = result.get()
return r
def contract(self, jobs, result):
"""
Perform a contract on a number of jobs and block until a result is
retrieved for each job.
"""
for j in jobs:
WorkerPool.put(self, j)
r = []
for i in xrange(len(jobs)):
r.append(result.get())
return r
|
rizar/attention-lvcsr | refs/heads/master | libs/Theano/theano/gof/graph.py | 1 | """
Node classes (`Apply`, `Variable`) and expression graph algorithms.
To read about what theano graphs are from a user perspective, have a look at
`graph.html <../doc/graph.html>`__.
"""
from __future__ import print_function
from collections import deque
from copy import copy
from itertools import count
import theano
from theano import config
from theano.gof import utils
from six import string_types, integer_types, iteritems
from theano.misc.ordered_set import OrderedSet
__docformat__ = "restructuredtext en"
# Lazy imports to avoid circular dependencies.
is_same_graph_with_merge = None
equal_computations = None
NoParams = object()
class Node(utils.object2):
"""
A Node in a theano graph.
Graphs contain two kinds of Nodes -- Variable and Apply.
Edges in the graph are not explicitly represented.
Instead each Node keeps track of its parents via
Variable.owner / Apply.inputs and its children
via Variable.clients / Apply.outputs.
"""
def get_parents(self):
"""
Return a list of the parents of this node.
Should return a copy--i.e., modifying the return
value should not modify the graph structure.
"""
raise NotImplementedError()
class Apply(Node):
"""
An :term:`Apply` instance is a node in an expression graph which represents
the application of an `Op` to some input `Variable` nodes, producing some
output `Variable` nodes.
This class is typically instantiated by an Op's make_node() function, which
is typically called by that Op's __call__() function.
An Apply instance serves as a simple structure with three important
attributes:
- :literal:`inputs` : a list of `Variable` nodes that represent the
arguments of the expression,
- :literal:`outputs` : a list of `Variable` nodes that represent the
variable of the expression, and
- :literal:`op` : an `Op` instance that determines the nature of the
expression being applied.
The driver `compile.function` uses Apply's inputs attribute together with
Variable's owner attribute to search the expression graph and determine
which inputs are necessary to compute the function's outputs.
A `Linker` uses the Apply instance's `op` field to compute the variables.
Comparing with the Python language, an `Apply` instance is theano's version
of a function call (or expression instance) whereas `Op` is theano's version
of a function definition.
Parameters
----------
op : `Op` instance
inputs : list of Variable instances
outputs : list of Variable instances
Notes
-----
The owner field of each output in the outputs list will be set to self.
If an output element has an owner that is neither None nor self, then a
ValueError exception will be raised.
"""
def __init__(self, op, inputs, outputs):
self.op = op
self.inputs = []
self.tag = utils.scratchpad()
if not isinstance(inputs, (list, tuple)):
raise TypeError("The inputs of an Apply must be a list or tuple")
if not isinstance(outputs, (list, tuple)):
raise TypeError("The output of an Apply must be a list or tuple")
# filter inputs to make sure each element is a Variable
for input in inputs:
if isinstance(input, Variable):
self.inputs.append(input)
else:
raise TypeError("The 'inputs' argument to Apply must contain Variable instances, not %s" % input)
self.outputs = []
# filter outputs to make sure each element is a Variable
for i, output in enumerate(outputs):
if isinstance(output, Variable):
if output.owner is None:
output.owner = self
output.index = i
elif output.owner is not self or output.index != i:
raise ValueError("All output variables passed to Apply must belong to it.")
self.outputs.append(output)
else:
raise TypeError("The 'outputs' argument to Apply must contain Variable instances with no owner, not %s" % output)
def run_params(self):
"""
Returns the params for the node, or NoParams if no params is set.
"""
if hasattr(self.op, 'get_params'):
return self.op.get_params(self)
return NoParams
def __getstate__(self):
d = self.__dict__
# ufunc don't pickle/unpickle well
if hasattr(self.tag, 'ufunc'):
d = copy(self.__dict__)
t = d["tag"]
del t.ufunc
d["tag"] = t
return d
def default_output(self):
"""
Returns the default output for this node.
Returns
-------
Variable instance
An element of self.outputs, typically self.outputs[0].
Notes
-----
May raise AttributeError self.op.default_output is out of range, or if
there are multiple outputs and self.op.default_output does not exist.
"""
do = getattr(self.op, 'default_output', None)
if do is None:
if len(self.outputs) == 1:
return self.outputs[0]
else:
raise AttributeError(
"%s.default_output should be an output index." % self.op)
elif not isinstance(do, integer_types):
raise AttributeError("%s.default_output should be an int or long" %
self.op)
elif do < 0 or do >= len(self.outputs):
raise AttributeError("%s.default_output is out of range." %
self.op)
return self.outputs[do]
out = property(default_output,
doc="alias for self.default_output()")
"""
Alias for self.default_output().
"""
def __str__(self):
return op_as_string(self.inputs, self)
def __repr__(self):
return str(self)
def __asapply__(self):
return self
def clone(self):
"""
Duplicate this Apply instance with inputs = self.inputs.
Returns
-------
object
A new Apply instance (or subclass instance) with new outputs.
Notes
-----
Tags are copied from self to the returned instance.
"""
cp = self.__class__(self.op, self.inputs,
[output.clone() for output in self.outputs])
cp.tag = copy(self.tag)
return cp
def clone_with_new_inputs(self, inputs, strict=True):
"""
Duplicate this Apply instance in a new graph.
Parameters
----------
inputs
List of Variable instances to use as inputs.
strict : bool
If True, the type fields of all the inputs must be equal
to the current ones (or compatible, for instance Tensor /
CudaNdarray of the same dtype and broadcastable patterns,
in which case they will be converted into current Type), and
returned outputs are guaranteed to have the same types as
self.outputs. If False, then there's no guarantee that the
clone's outputs will have the same types as self.outputs,
and cloning may not even be possible (it depends on the Op).
Returns
-------
object
An Apply instance with the same op but different outputs.
"""
assert isinstance(inputs, (list, tuple))
remake_node = False
new_inputs = inputs[:]
for i, (curr, new) in enumerate(zip(self.inputs, new_inputs)):
if not curr.type == new.type:
if strict:
# If compatible, casts new into curr.type
new_inputs[i] = curr.type.filter_variable(new)
else:
remake_node = True
if remake_node:
new_node = self.op.make_node(*new_inputs)
new_node.tag = copy(self.tag).__update__(new_node.tag)
else:
new_node = self.clone()
new_node.inputs = new_inputs
return new_node
def get_parents(self):
return list(self.inputs)
# convenience properties
nin = property(lambda self: len(self.inputs), doc='same as len(self.inputs)')
"""
Property: Number of inputs.
"""
nout = property(lambda self: len(self.outputs), doc='same as len(self.outputs)')
"""
Property: Number of outputs.
"""
params_type = property(lambda self: self.op.params_type, doc='type to use for the params')
class Variable(Node):
"""
A :term:`Variable` is a node in an expression graph that represents a
variable.
The inputs and outputs of every `Apply` (theano.gof.Apply) are `Variable`
instances. The input and output arguments to create a `function` are also
`Variable` instances. A `Variable` is like a strongly-typed variable in
some other languages; each `Variable` contains a reference to a `Type`
instance that defines the kind of value the `Variable` can take in a
computation.
A `Variable` is a container for four important attributes:
- :literal:`type` a `Type` instance defining the kind of value this
`Variable` can have,
- :literal:`owner` either None (for graph roots) or the `Apply` instance
of which `self` is an output,
- :literal:`index` the integer such that :literal:`owner.outputs[index] is
this_variable` (ignored if `owner` is None),
- :literal:`name` a string to use in pretty-printing and debugging.
There are a few kinds of Variables to be aware of: A Variable which is the
output of a symbolic computation has a reference to the Apply instance to
which it belongs (property: owner) and the position of itself in the owner's
output list (property: index).
- `Variable` (this base type) is typically the output of a symbolic
computation.
- `Constant` (a subclass) which adds a default and un-replaceable
:literal:`value`, and requires that owner is None.
- `TensorVariable` subclass of Variable that represents a numpy.ndarray
object.
- `TensorSharedVariable` Shared version of TensorVariable.
- `SparseVariable` subclass of Variable that represents
a scipy.sparse.{csc,csr}_matrix object.
- `CudaNdarrayVariable` subclass of Variable that represents our object on
the GPU that is a subset of numpy.ndarray.
- `RandomVariable`.
A Variable which is the output of a symbolic computation will have an owner
not equal to None.
Using the Variables' owner field and the Apply nodes' inputs fields, one can
navigate a graph from an output all the way to the inputs. The opposite
direction is not possible until a FunctionGraph has annotated the Variables
with the clients field, ie, before the compilation process has begun a
Variable does not know which Apply nodes take it as input.
Parameters
----------
type : a Type instance
The type governs the kind of data that can be associated with this
variable.
owner : None or Apply instance
The Apply instance which computes the value for this variable.
index : None or int
The position of this Variable in owner.outputs.
name : None or str
A string for pretty-printing and debugging.
Examples
--------
.. code-block:: python
import theano
from theano import tensor
a = tensor.constant(1.5) # declare a symbolic constant
b = tensor.fscalar() # declare a symbolic floating-point scalar
c = a + b # create a simple expression
f = theano.function([b], [c]) # this works because a has a value associated with it already
assert 4.0 == f(2.5) # bind 2.5 to an internal copy of b and evaluate an internal c
theano.function([a], [c]) # compilation error because b (required by c) is undefined
theano.function([a,b], [c]) # compilation error because a is constant, it can't be an input
d = tensor.value(1.5) # create a value similar to the constant 'a'
e = d + b
theano.function([d,b], [e]) # this works. d's default value of 1.5 is ignored.
The python variables :literal:`a,b,c` all refer to instances of type
`Variable`. The `Variable` refered to by `a` is also an instance of
`Constant`.
`compile.function` uses each `Apply` instance's `inputs` attribute together
with each Variable's `owner` field to determine which inputs are necessary
to compute the function's outputs.
"""
# __slots__ = ['type', 'owner', 'index', 'name']
__count__ = count(0)
def __init__(self, type, owner=None, index=None, name=None):
super(Variable, self).__init__()
self.tag = utils.scratchpad()
self.type = type
if owner is not None and not isinstance(owner, Apply):
raise TypeError("owner must be an Apply instance", owner)
self.owner = owner
if index is not None and not isinstance(index, int):
raise TypeError("index must be an int", index)
self.index = index
if name is not None and not isinstance(name, string_types):
raise TypeError("name must be a string", name)
self.name = name
self.auto_name = 'auto_' + str(next(self.__count__))
def __str__(self):
"""Return a str representation of the Variable.
"""
if self.name is not None:
return self.name
if self.owner is not None:
op = self.owner.op
if self.index == op.default_output:
return str(self.owner.op) + ".out"
else:
return str(self.owner.op) + "." + str(self.index)
else:
return "<%s>" % str(self.type)
def __repr_test_value__(self):
"""Return a repr of the test value.
Return a printable representation of the test value. It can be
overridden by classes with non printable test_value to provide a
suitable representation of the test_value.
"""
return repr(theano.gof.op.get_test_value(self))
def __repr__(self, firstPass=True):
"""Return a repr of the Variable.
Return a printable name or description of the Variable. If
config.print_test_value is True it will also print the test_value if
any.
"""
to_print = [str(self)]
if config.print_test_value and firstPass:
try:
to_print.append(self.__repr_test_value__())
except AttributeError:
pass
return '\n'.join(to_print)
def clone(self):
"""
Return a new Variable like self.
Returns
-------
Variable instance
A new Variable instance (or subclass instance) with no owner or
index.
Notes
-----
Tags are copied to the returned instance.
Name is copied to the returned instance.
"""
# return copy(self)
cp = self.__class__(self.type, None, None, self.name)
cp.tag = copy(self.tag)
return cp
def __lt__(self, other):
raise NotImplementedError('Subclasses of Variable must provide __lt__',
self.__class__.__name__)
def __le__(self, other):
raise NotImplementedError('Subclasses of Variable must provide __le__',
self.__class__.__name__)
def __gt__(self, other):
raise NotImplementedError('Subclasses of Variable must provide __gt__',
self.__class__.__name__)
def __ge__(self, other):
raise NotImplementedError('Subclasses of Variable must provide __ge__',
self.__class__.__name__)
def get_parents(self):
if self.owner is not None:
return [self.owner]
return []
def eval(self, inputs_to_values=None):
"""
Evaluates this variable.
Parameters
----------
inputs_to_values
A dictionary mapping theano Variables to values.
Examples
--------
>>> import numpy
>>> import theano.tensor as T
>>> x = T.dscalar('x')
>>> y = T.dscalar('y')
>>> z = x + y
>>> numpy.allclose(z.eval({x : 16.3, y : 12.1}), 28.4)
True
We passed :func:`eval` a dictionary mapping symbolic theano
variables to the values to substitute for them, and it returned
the numerical value of the expression.
Notes
-----
`eval` will be slow the first time you call it on a variable --
it needs to call :func:`function` to compile the expression behind
the scenes. Subsequent calls to :func:`eval` on that same variable
will be fast, because the variable caches the compiled function.
This way of computing has more overhead than a normal Theano
function, so don't use it too much in real scripts.
"""
if inputs_to_values is None:
inputs_to_values = {}
if not hasattr(self, '_fn_cache'):
self._fn_cache = dict()
inputs = tuple(sorted(inputs_to_values.keys(), key=id))
if inputs not in self._fn_cache:
self._fn_cache[inputs] = theano.function(inputs, self)
args = [inputs_to_values[param] for param in inputs]
rval = self._fn_cache[inputs](*args)
return rval
def __getstate__(self):
d = self.__dict__.copy()
d.pop("_fn_cache", None)
return d
class Constant(Variable):
"""
A :term:`Constant` is a `Variable` with a `value` field that cannot be
changed at runtime.
Constant nodes make eligible numerous optimizations: constant inlining in
C code, constant folding, etc.
Notes
-----
The data field is filtered by what is provided in the constructor for the
Constant's type field.
WRITEME
"""
# __slots__ = ['data']
def __init__(self, type, data, name=None):
Variable.__init__(self, type, None, None, name)
self.data = type.filter(data)
def equals(self, other):
# this does what __eq__ should do, but Variable and Apply should always be hashable by id
return isinstance(other, Constant) and self.signature() == other.signature()
def signature(self):
return (self.type, self.data)
def merge_signature(self):
return self.signature()
def __str__(self):
if self.name is not None:
return self.name
else:
name = str(self.data)
if len(name) > 20:
name = name[:10] + '...' + name[-10:]
return 'Constant{%s}' % name
def clone(self):
"""
We clone this object, but we don't clone the data to lower memory
requirement. We suppose that the data will never change.
"""
cp = self.__class__(self.type, self.data, self.name)
cp.tag = copy(self.tag)
return cp
def __set_owner(self, value):
"""
WRITEME
Raises
------
ValueError
If `value` is not `None`.
"""
if value is not None:
raise ValueError("Constant instances cannot have an owner.")
owner = property(lambda self: None, __set_owner)
value = property(lambda self: self.data, doc='read-only data access method')
# index is not defined, because the `owner` attribute must necessarily be None
def stack_search(start, expand, mode='bfs', build_inv=False):
"""
Search through a graph, either breadth- or depth-first.
Parameters
----------
start : deque
Search from these nodes.
expand : callable
When we get to a node, add expand(node) to the list of nodes to visit.
This function should return a list, or None.
Returns
-------
list of `Variable` or `Apply` instances (depends on `expend`)
The list of nodes in order of traversal.
Notes
-----
A node will appear at most once in the return value, even if it
appears multiple times in the start parameter.
:postcondition: every element of start is transferred to the returned list.
:postcondition: start is empty.
"""
if mode not in ('bfs', 'dfs'):
raise ValueError('mode should be bfs or dfs', mode)
rval_set = set()
rval_list = list()
if mode == 'bfs':
start_pop = start.popleft
else:
start_pop = start.pop
expand_inv = {}
while start:
l = start_pop()
if id(l) not in rval_set:
rval_list.append(l)
rval_set.add(id(l))
expand_l = expand(l)
if expand_l:
if build_inv:
for r in expand_l:
expand_inv.setdefault(r, []).append(l)
start.extend(expand_l)
assert len(rval_list) == len(rval_set)
if build_inv:
return rval_list, expand_inv
return rval_list
def ancestors(variable_list, blockers=None):
"""
Return the variables that contribute to those in variable_list (inclusive).
Parameters
----------
variable_list : list of `Variable` instances
Output `Variable` instances from which to search backward through
owners.
Returns
-------
list of `Variable` instances
All input nodes, in the order found by a left-recursive depth-first
search started at the nodes in `variable_list`.
"""
def expand(r):
if r.owner and (not blockers or r not in blockers):
return reversed(r.owner.inputs)
dfs_variables = stack_search(deque(variable_list), expand, 'dfs')
return dfs_variables
def inputs(variable_list, blockers=None):
"""
Return the inputs required to compute the given Variables.
Parameters
----------
variable_list : list of `Variable` instances
Output `Variable` instances from which to search backward through
owners.
Returns
-------
list of `Variable` instances
Input nodes with no owner, in the order found by a left-recursive
depth-first search started at the nodes in `variable_list`.
"""
vlist = ancestors(variable_list, blockers)
rval = [r for r in vlist if r.owner is None]
return rval
def variables_and_orphans(i, o):
"""
WRITEME
"""
def expand(r):
if r.owner and r not in i:
l = list(r.owner.inputs) + list(r.owner.outputs)
l.reverse()
return l
variables = stack_search(deque(o), expand, 'dfs')
orphans = [r for r in variables if r.owner is None and r not in i]
return variables, orphans
def ops(i, o):
"""
WRITEME
Parameters
----------
i : list
Input L{Variable}s.
o : list
Output L{Variable}s.
Returns
-------
object
The set of ops that are contained within the subgraph that lies
between i and o, including the owners of the L{Variable}s in o and
intermediary ops between i and o, but not the owners of the L{Variable}s
in i.
"""
ops = set()
variables, orphans = variables_and_orphans(i, o)
for r in variables:
if r not in i and r not in orphans:
if r.owner is not None:
ops.add(r.owner)
return ops
def variables(i, o):
"""
WRITEME
Parameters
----------
i : list
Input L{Variable}s.
o : list
Output L{Variable}s.
Returns
-------
object
The set of Variables that are involved in the subgraph that lies
between i and o. This includes i, o, orphans(i, o) and all values of
all intermediary steps from i to o.
"""
return variables_and_orphans(i, o)[0]
def orphans(i, o):
"""
WRITEME
Parameters
----------
i : list
Input L{Variable}s.
o : list
Output L{Variable}s.
Returns
-------
object
The set of Variables which one or more Variables in o depend on but are
neither in i nor in the subgraph that lies between i and o.
Examples
--------
orphans([x], [(x+y).out]) => [y]
"""
return variables_and_orphans(i, o)[1]
def clone(i, o, copy_inputs=True):
"""
Copies the subgraph contained between i and o.
Parameters
----------
i : list
Input L{Variable}s.
o : list
Output L{Variable}s.
copy_inputs : bool
If True, the inputs will be copied (defaults to True).
Returns
-------
object
The inputs and outputs of that copy.
"""
equiv = clone_get_equiv(i, o, copy_inputs)
return [equiv[input] for input in i], [equiv[output] for output in o]
def clone_get_equiv(inputs, outputs, copy_inputs_and_orphans=True, memo=None):
"""
Return a dictionary that maps from Variable and Apply nodes in the
original graph to a new node (a clone) in a new graph.
This function works by recursively cloning inputs... rebuilding a directed
graph from the bottom (inputs) up to eventually building new outputs.
Parameters
----------
inputs : a list of Variables
outputs : a list of Variables
copy_inputs_and_orphans : bool
True means to create the cloned graph from new input and constant
nodes (the bottom of a feed-upward graph).
False means to clone a graph that is rooted at the original input
nodes.
memo : None or dict
Optionally start with a partly-filled dictionary for the return value.
If a dictionary is passed, this function will work in-place on that
dictionary and return it.
"""
if memo is None:
memo = {}
# clone the inputs if necessary
for input in inputs:
if copy_inputs_and_orphans:
cpy = input.clone()
cpy.owner = None
cpy.index = None
memo.setdefault(input, cpy)
else:
memo.setdefault(input, input)
# go through the inputs -> outputs graph cloning as we go
for apply in io_toposort(inputs, outputs):
for input in apply.inputs:
if input not in memo:
if copy_inputs_and_orphans:
cpy = input.clone()
memo[input] = cpy
else:
memo[input] = input
new_apply = apply.clone_with_new_inputs([memo[i] for i in apply.inputs])
memo.setdefault(apply, new_apply)
for output, new_output in zip(apply.outputs, new_apply.outputs):
memo.setdefault(output, new_output)
# finish up by cloning any remaining outputs (it can happen)
for output in outputs:
if output not in memo:
memo[output] = output.clone()
return memo
def general_toposort(r_out, deps, debug_print=False,
compute_deps_cache=None, deps_cache=None,
clients=None):
"""
WRITEME
Parameters
----------
deps
A python function that takes a node as input and returns its dependence.
compute_deps_cache : optional
If provided deps_cache should also be provided. This is a function like
deps, but that also cache its results in a dict passed as deps_cache.
deps_cache : dict
Must be used with compute_deps_cache.
clients : dict
If a dict is passed it will be filled with a mapping of node
-> clients for each node in the subgraph.
Notes
-----
deps(i) should behave like a pure function (no funny business with
internal state).
deps(i) will be cached by this function (to be fast).
The order of the return value list is determined by the order of nodes
returned by the deps() function.
deps should be provided or can be None and the caller provides
compute_deps_cache and deps_cache. The second option removes a Python
function call, and allows for more specialized code, so it can be
faster.
"""
if compute_deps_cache is None:
deps_cache = {}
def compute_deps_cache(io):
if io not in deps_cache:
d = deps(io)
if d:
if not isinstance(d, (list, OrderedSet)):
raise TypeError(
"Non-deterministic collections here make"
" toposort non-deterministic.")
deps_cache[io] = list(d)
else:
deps_cache[io] = d
return d
else:
return deps_cache[io]
assert deps_cache is not None
assert isinstance(r_out, (tuple, list, deque))
reachable, _clients = stack_search(deque(r_out), compute_deps_cache,
'dfs', True)
if clients is not None:
clients.update(_clients)
sources = deque([r for r in reachable if not deps_cache.get(r, None)])
rset = set()
rlist = []
while sources:
node = sources.popleft()
if node not in rset:
rlist.append(node)
rset.add(node)
for client in _clients.get(node, []):
deps_cache[client] = [a for a in deps_cache[client]
if a is not node]
if not deps_cache[client]:
sources.append(client)
if len(rlist) != len(reachable):
if debug_print:
print('')
print(reachable)
print(rlist)
raise ValueError('graph contains cycles')
return rlist
def io_toposort(inputs, outputs, orderings=None, clients=None):
"""
WRITEME
Parameters
----------
inputs : list or tuple of Variable instances
outputs : list or tuple of Apply instances
orderings : dict
Key: Apply instance. Value: list of Apply instance.
It is important that the value be a container with a deterministic
iteration order. No sets allowed!
clients : dict
If a dict is provided it will be filled with mappings of
node->clients for each node in the subgraph that is sorted
"""
# the inputs are used only here in the function that decides what 'predecessors' to explore
iset = set(inputs)
# We build 2 functions as a speed up
deps_cache = {}
compute_deps = None
compute_deps_cache = None
if not orderings: # can be None or empty dict
# Specialized function that is faster when no ordering.
# Also include the cache in the function itself for speed up.
def compute_deps_cache(obj):
if obj in deps_cache:
return deps_cache[obj]
rval = []
if obj not in iset:
if isinstance(obj, Variable):
if obj.owner:
rval = [obj.owner]
elif isinstance(obj, Apply):
rval = list(obj.inputs)
if rval:
if not isinstance(rval, (list, OrderedSet)):
raise TypeError(
"Non-deterministic collections here make"
" toposort non-deterministic.")
deps_cache[obj] = list(rval)
else:
deps_cache[obj] = rval
else:
deps_cache[obj] = rval
return rval
else:
def compute_deps(obj):
rval = []
if obj not in iset:
if isinstance(obj, Variable):
if obj.owner:
rval = [obj.owner]
elif isinstance(obj, Apply):
rval = list(obj.inputs)
rval.extend(orderings.get(obj, []))
else:
assert not orderings.get(obj, [])
return rval
topo = general_toposort(outputs, deps=compute_deps,
compute_deps_cache=compute_deps_cache,
deps_cache=deps_cache, clients=clients)
return [o for o in topo if isinstance(o, Apply)]
default_leaf_formatter = str
def default_node_formatter(op, argstrings):
return "%s(%s)" % (op.op, ", ".join(argstrings))
def io_connection_pattern(inputs, outputs):
"""
Returns the connection pattern of a subgraph defined by given
inputs and outputs.
"""
inner_nodes = io_toposort(inputs, outputs)
# Initialize 'connect_pattern_by_var' by establishing each input as
# connected only to itself
connect_pattern_by_var = {}
nb_inputs = len(inputs)
for i in range(nb_inputs):
input = inputs[i]
inp_connection_pattern = [i == j for j in range(nb_inputs)]
connect_pattern_by_var[input] = inp_connection_pattern
# Iterate through the nodes used to produce the outputs from the
# inputs and, for every node, infer their connection pattern to
# every input from the connection patterns of their parents.
for n in inner_nodes:
# Get the connection pattern of the inner node's op. If the op
# does not define a connection_pattern method, assume that
# every node output is connected to every node input
try:
op_connection_pattern = n.op.connection_pattern(n)
except AttributeError:
op_connection_pattern = ([[True] * len(n.outputs)] *
len(n.inputs))
# For every output of the inner node, figure out which inputs it
# is connected to by combining the connection pattern of the inner
# node and the connection patterns of the inner node's inputs.
for out_idx in range(len(n.outputs)):
out = n.outputs[out_idx]
out_connection_pattern = [False] * nb_inputs
for inp_idx in range(len(n.inputs)):
inp = n.inputs[inp_idx]
if inp in connect_pattern_by_var:
inp_connection_pattern = connect_pattern_by_var[inp]
# If the node output is connected to the node input, it
# means it is connected to every inner input that the
# node inputs is connected to
if op_connection_pattern[inp_idx][out_idx]:
out_connection_pattern = [out_connection_pattern[i] or
inp_connection_pattern[i]
for i in range(nb_inputs)]
# Store the connection pattern of the node output
connect_pattern_by_var[out] = out_connection_pattern
# Obtain the global connection pattern by combining the
# connnection patterns of the individual outputs
global_connection_pattern = [[] for o in range(len(inputs))]
for out in outputs:
out_connection_pattern = connect_pattern_by_var[out]
for i in range(len(inputs)):
global_connection_pattern[i].append(out_connection_pattern[i])
return global_connection_pattern
def is_same_graph(var1, var2, givens=None, debug=False):
"""
Return True iff Variables `var1` and `var2` perform the same computation.
By 'performing the same computation', we mean that they must share the same
graph, so that for instance this function will return False when comparing
(x * (y * z)) with ((x * y) * z).
The current implementation is not efficient since, when possible, it
verifies equality by calling two different functions that are expected to
return the same output. The goal is to verify this assumption, to
eventually get rid of one of them in the future.
Parameters
----------
var1
The first Variable to compare.
var2
The second Variable to compare.
givens
Similar to the `givens` argument of `theano.function`, it can be used
to perform substitutions in the computational graph of `var1` and
`var2`. This argument is associated to neither `var1` nor `var2`:
substitutions may affect both graphs if the substituted variable
is present in both.
debug : bool
If True, then an exception is raised when we are in a situation where
the `equal_computations` implementation cannot be called.
This parameter is intended to be used in tests only, to make sure we
properly test both implementations.
Examples
--------
====== ====== ====== ======
var1 var2 givens output
====== ====== ====== ======
x + 1 x + 1 {} True
x + 1 y + 1 {} False
x + 1 y + 1 {x: y} True
====== ====== ====== ======
"""
# Lazy import.
if givens is None:
givens = {}
global equal_computations, is_same_graph_with_merge
if equal_computations is None:
from theano.gof.opt import is_same_graph_with_merge
from theano.scan_module.scan_utils import equal_computations
# Convert `givens` to dictionary.
if not isinstance(givens, dict):
givens = dict(givens)
# Get result from the merge-based function.
rval1 = is_same_graph_with_merge(var1=var1, var2=var2, givens=givens)
# Get result from the function `equal_computations` from scan_utils.
use_equal_computations = True
if givens:
# We need to build the `in_xs` and `in_ys` lists. To do this, we need
# to be able to tell whether a variable belongs to the computational
# graph of `var1` or `var2`.
# The typical case we want to handle is when `to_replace` belongs to
# one of these graphs, and `replace_by` belongs to the other one. In
# other situations, the current implementation of `equal_computations`
# is probably not appropriate, so we do not call it.
ok = True
in_xs = []
in_ys = []
# Compute the sets of all variables found in each computational graph.
inputs_var = list(map(inputs, ([var1], [var2])))
all_vars = [set(variables(v_i, v_o))
for v_i, v_o in ((inputs_var[0], [var1]),
(inputs_var[1], [var2]))]
def in_var(x, k):
# Return True iff `x` is in computation graph of variable `vark`.
return x in all_vars[k - 1]
for to_replace, replace_by in iteritems(givens):
# Map a substitution variable to the computational graphs it
# belongs to.
inside = dict((v, [in_var(v, k) for k in (1, 2)])
for v in (to_replace, replace_by))
if (inside[to_replace][0] and not inside[to_replace][1] and
inside[replace_by][1] and not inside[replace_by][0]):
# Substitute variable in `var1` by one from `var2`.
in_xs.append(to_replace)
in_ys.append(replace_by)
elif (inside[to_replace][1] and not inside[to_replace][0] and
inside[replace_by][0] and not inside[replace_by][1]):
# Substitute variable in `var2` by one from `var1`.
in_xs.append(replace_by)
in_ys.append(to_replace)
else:
ok = False
break
if not ok:
# We cannot directly use `equal_computations`.
if debug:
raise AssertionError(
'When `debug` is True we want to make sure we are also '
'using the `equal_computations` implementation')
use_equal_computations = False
else:
in_xs = None
in_ys = None
if use_equal_computations:
rval2 = equal_computations(xs=[var1], ys=[var2],
in_xs=in_xs, in_ys=in_ys)
assert rval2 == rval1
return rval1
def op_as_string(i, op,
leaf_formatter=default_leaf_formatter,
node_formatter=default_node_formatter):
"""
WRITEME
"""
strs = as_string(i, op.inputs, leaf_formatter, node_formatter)
return node_formatter(op, strs)
def as_string(i, o,
leaf_formatter=default_leaf_formatter,
node_formatter=default_node_formatter):
"""
WRITEME
Parameters
----------
i : list
Input `Variable` s.
o : list
Output `Variable` s.
leaf_formatter : function
Takes a `Variable` and returns a string to describe it.
node_formatter : function
Takes an `Op` and the list of strings corresponding to its arguments
and returns a string to describe it.
Returns
-------
str
Returns a string representation of the subgraph between i and o. If the
same op is used by several other ops, the first occurrence will be
marked as :literal:`*n -> description` and all subsequent occurrences
will be marked as :literal:`*n`, where n is an id number (ids are
attributed in an unspecified order and only exist for viewing
convenience).
"""
i = set(i)
orph = orphans(i, o)
multi = set()
seen = set()
for output in o:
op = output.owner
if op in seen:
multi.add(op)
else:
seen.add(op)
for op in ops(i, o):
for input in op.inputs:
op2 = input.owner
if input in i or input in orph or op2 is None:
continue
if op2 in seen:
multi.add(op2)
else:
seen.add(input.owner)
multi = [x for x in multi]
done = set()
def multi_index(x):
return multi.index(x) + 1
def describe(r):
if r.owner is not None and r not in i and r not in orph:
op = r.owner
idx = op.outputs.index(r)
if len(op.outputs) == 1:
idxs = ""
else:
idxs = "::%i" % idx
if op in done:
return "*%i%s" % (multi_index(op), idxs)
else:
done.add(op)
s = node_formatter(op, [describe(input) for input in op.inputs])
if op in multi:
return "*%i -> %s" % (multi_index(op), s)
else:
return s
else:
return leaf_formatter(r)
return [describe(output) for output in o]
def view_roots(r):
"""
Utility function that returns the leaves of a search through
consecutive view_map()s.
WRITEME
"""
owner = r.owner
if owner is not None:
try:
view_map = owner.op.view_map
view_map = dict((owner.outputs[o], i)
for o, i in iteritems(view_map))
except AttributeError:
return [r]
if r in view_map:
answer = []
for i in view_map[r]:
answer += view_roots(owner.inputs[i])
return answer
else:
return [r]
else:
return [r]
def list_of_nodes(inputs, outputs):
"""
Return the apply nodes of the graph between inputs and outputs.
"""
return stack_search(
deque([o.owner for o in outputs]),
lambda o: [inp.owner for inp in o.inputs
if inp.owner and
not any(i in inp.owner.outputs for i in inputs)])
|
jfmorcillo/mss | refs/heads/master | modules/davical/__init__.py | 3 | # -*- coding: UTF-8 -*-
#
# (c) 2012 Mandriva, http://www.mandriva.com/
#
# This file is part of Mandriva Server Setup
#
# MSS is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MSS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MSS; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
def get_config_info():
return ('setup-davical.sh', [])
|
lmazuel/azure-sdk-for-python | refs/heads/master | azure-mgmt-web/azure/mgmt/web/models/deployment.py | 2 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .proxy_only_resource import ProxyOnlyResource
class Deployment(ProxyOnlyResource):
"""User crendentials used for publishing activity.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param deployment_id: Identifier for deployment.
:type deployment_id: str
:param status: Deployment status.
:type status: int
:param message: Details about deployment status.
:type message: str
:param author: Who authored the deployment.
:type author: str
:param deployer: Who performed the deployment.
:type deployer: str
:param author_email: Author email.
:type author_email: str
:param start_time: Start time.
:type start_time: datetime
:param end_time: End time.
:type end_time: datetime
:param active: True if deployment is currently active, false if completed
and null if not started.
:type active: bool
:param details: Details on deployment.
:type details: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'deployment_id': {'key': 'properties.id', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'int'},
'message': {'key': 'properties.message', 'type': 'str'},
'author': {'key': 'properties.author', 'type': 'str'},
'deployer': {'key': 'properties.deployer', 'type': 'str'},
'author_email': {'key': 'properties.authorEmail', 'type': 'str'},
'start_time': {'key': 'properties.startTime', 'type': 'iso-8601'},
'end_time': {'key': 'properties.endTime', 'type': 'iso-8601'},
'active': {'key': 'properties.active', 'type': 'bool'},
'details': {'key': 'properties.details', 'type': 'str'},
}
def __init__(self, kind=None, deployment_id=None, status=None, message=None, author=None, deployer=None, author_email=None, start_time=None, end_time=None, active=None, details=None):
super(Deployment, self).__init__(kind=kind)
self.deployment_id = deployment_id
self.status = status
self.message = message
self.author = author
self.deployer = deployer
self.author_email = author_email
self.start_time = start_time
self.end_time = end_time
self.active = active
self.details = details
|
gspilio/nova | refs/heads/master | nova/virt/xenapi/vm_utils.py | 2 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper methods for operations related to the management of VM records and
their attributes like VDIs, VIFs, as well as their lookup functions.
"""
import contextlib
import decimal
import os
import re
import time
import urllib
import urlparse
import uuid
from xml.dom import minidom
from xml.parsers import expat
from eventlet import greenthread
from oslo.config import cfg
from nova.api.metadata import base as instance_metadata
from nova import block_device
from nova.compute import instance_types
from nova.compute import power_state
from nova.compute import task_states
from nova import exception
from nova.image import glance
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
from nova import utils
from nova.virt import configdrive
from nova.virt.disk import api as disk
from nova.virt.disk.vfs import localfs as vfsimpl
from nova.virt.xenapi import agent
from nova.virt.xenapi import volume_utils
LOG = logging.getLogger(__name__)
xenapi_vm_utils_opts = [
cfg.StrOpt('cache_images',
default='all',
help='Cache glance images locally. `all` will cache all'
' images, `some` will only cache images that have the'
' image_property `cache_in_nova=True`, and `none` turns'
' off caching entirely'),
cfg.StrOpt('default_os_type',
default='linux',
help='Default OS type'),
cfg.IntOpt('block_device_creation_timeout',
default=10,
help='Time to wait for a block device to be created'),
cfg.IntOpt('max_kernel_ramdisk_size',
default=16 * 1024 * 1024,
help='Maximum size in bytes of kernel or ramdisk images'),
cfg.StrOpt('sr_matching_filter',
default='other-config:i18n-key=local-storage',
help='Filter for finding the SR to be used to install guest '
'instances on. The default value is the Local Storage in '
'default XenServer/XCP installations. To select an SR '
'with a different matching criteria, you could set it to '
'other-config:my_favorite_sr=true. On the other hand, to '
'fall back on the Default SR, as displayed by XenCenter, '
'set this flag to: default-sr:true'),
cfg.BoolOpt('xenapi_sparse_copy',
default=True,
help='Whether to use sparse_copy for copying data on a '
'resize down (False will use standard dd). This speeds '
'up resizes down considerably since large runs of zeros '
'won\'t have to be rsynced'),
cfg.IntOpt('xenapi_num_vbd_unplug_retries',
default=10,
help='Maximum number of retries to unplug VBD'),
cfg.StrOpt('xenapi_torrent_images',
default='none',
help='Whether or not to download images via Bit Torrent '
'(all|some|none).'),
cfg.StrOpt('xenapi_torrent_base_url',
default=None,
help='Base URL for torrent files.'),
cfg.FloatOpt('xenapi_torrent_seed_chance',
default=1.0,
help='Probability that peer will become a seeder.'
' (1.0 = 100%)'),
cfg.IntOpt('xenapi_torrent_seed_duration',
default=3600,
help='Number of seconds after downloading an image via'
' BitTorrent that it should be seeded for other peers.'),
cfg.IntOpt('xenapi_torrent_max_last_accessed',
default=86400,
help='Cached torrent files not accessed within this number of'
' seconds can be reaped'),
cfg.IntOpt('xenapi_torrent_listen_port_start',
default=6881,
help='Beginning of port range to listen on'),
cfg.IntOpt('xenapi_torrent_listen_port_end',
default=6891,
help='End of port range to listen on'),
cfg.IntOpt('xenapi_torrent_download_stall_cutoff',
default=600,
help='Number of seconds a download can remain at the same'
' progress percentage w/o being considered a stall'),
cfg.IntOpt('xenapi_torrent_max_seeder_processes_per_host',
default=1,
help='Maximum number of seeder processes to run concurrently'
' within a given dom0. (-1 = no limit)')
]
CONF = cfg.CONF
CONF.register_opts(xenapi_vm_utils_opts)
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
CONF.import_opt('use_cow_images', 'nova.virt.driver')
CONF.import_opt('glance_num_retries', 'nova.image.glance')
CONF.import_opt('use_ipv6', 'nova.netconf')
XENAPI_POWER_STATE = {
'Halted': power_state.SHUTDOWN,
'Running': power_state.RUNNING,
'Paused': power_state.PAUSED,
'Suspended': power_state.SUSPENDED,
'Crashed': power_state.CRASHED}
SECTOR_SIZE = 512
MBR_SIZE_SECTORS = 63
MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE
KERNEL_DIR = '/boot/guest'
MAX_VDI_CHAIN_SIZE = 16
class ImageType(object):
"""Enumeration class for distinguishing different image types
| 0 - kernel image (goes on dom0's filesystem)
| 1 - ramdisk image (goes on dom0's filesystem)
| 2 - disk image (local SR, partitioned by objectstore plugin)
| 3 - raw disk image (local SR, NOT partitioned by plugin)
| 4 - vhd disk image (local SR, NOT inspected by XS, PV assumed for
| linux, HVM assumed for Windows)
| 5 - ISO disk image (local SR, NOT partitioned by plugin)
| 6 - config drive
"""
KERNEL = 0
RAMDISK = 1
DISK = 2
DISK_RAW = 3
DISK_VHD = 4
DISK_ISO = 5
DISK_CONFIGDRIVE = 6
_ids = (KERNEL, RAMDISK, DISK, DISK_RAW, DISK_VHD, DISK_ISO,
DISK_CONFIGDRIVE)
KERNEL_STR = "kernel"
RAMDISK_STR = "ramdisk"
DISK_STR = "root"
DISK_RAW_STR = "os_raw"
DISK_VHD_STR = "vhd"
DISK_ISO_STR = "iso"
DISK_CONFIGDRIVE_STR = "configdrive"
_strs = (KERNEL_STR, RAMDISK_STR, DISK_STR, DISK_RAW_STR, DISK_VHD_STR,
DISK_ISO_STR, DISK_CONFIGDRIVE_STR)
@classmethod
def to_string(cls, image_type):
return dict(zip(ImageType._ids, ImageType._strs)).get(image_type)
@classmethod
def get_role(cls, image_type_id):
"""Get the role played by the image, based on its type."""
return {
cls.KERNEL: 'kernel',
cls.RAMDISK: 'ramdisk',
cls.DISK: 'root',
cls.DISK_RAW: 'root',
cls.DISK_VHD: 'root',
cls.DISK_ISO: 'iso',
cls.DISK_CONFIGDRIVE: 'configdrive'
}.get(image_type_id)
def create_vm(session, instance, name_label, kernel, ramdisk,
use_pv_kernel=False):
"""Create a VM record. Returns new VM reference.
the use_pv_kernel flag indicates whether the guest is HVM or PV
There are 3 scenarios:
1. Using paravirtualization, kernel passed in
2. Using paravirtualization, kernel within the image
3. Using hardware virtualization
"""
instance_type = instance_types.extract_instance_type(instance)
mem = str(long(instance_type['memory_mb']) * 1024 * 1024)
vcpus = str(instance_type['vcpus'])
rec = {
'actions_after_crash': 'destroy',
'actions_after_reboot': 'restart',
'actions_after_shutdown': 'destroy',
'affinity': '',
'blocked_operations': {},
'ha_always_run': False,
'ha_restart_priority': '',
'HVM_boot_params': {},
'HVM_boot_policy': '',
'is_a_template': False,
'memory_dynamic_min': mem,
'memory_dynamic_max': mem,
'memory_static_min': '0',
'memory_static_max': mem,
'memory_target': mem,
'name_description': '',
'name_label': name_label,
'other_config': {'nova_uuid': str(instance['uuid'])},
'PCI_bus': '',
'platform': {'acpi': 'true', 'apic': 'true', 'pae': 'true',
'viridian': 'true', 'timeoffset': '0'},
'PV_args': '',
'PV_bootloader': '',
'PV_bootloader_args': '',
'PV_kernel': '',
'PV_legacy_args': '',
'PV_ramdisk': '',
'recommendations': '',
'tags': [],
'user_version': '0',
'VCPUs_at_startup': vcpus,
'VCPUs_max': vcpus,
'VCPUs_params': {},
'xenstore_data': {'allowvssprovider': 'false'}}
# Complete VM configuration record according to the image type
# non-raw/raw with PV kernel/raw in HVM mode
if use_pv_kernel:
rec['platform']['nx'] = 'false'
if instance['kernel_id']:
# 1. Kernel explicitly passed in, use that
rec['PV_args'] = 'root=/dev/xvda1'
rec['PV_kernel'] = kernel
rec['PV_ramdisk'] = ramdisk
else:
# 2. Use kernel within the image
rec['PV_bootloader'] = 'pygrub'
else:
# 3. Using hardware virtualization
rec['platform']['nx'] = 'true'
rec['HVM_boot_params'] = {'order': 'dc'}
rec['HVM_boot_policy'] = 'BIOS order'
vm_ref = session.call_xenapi('VM.create', rec)
LOG.debug(_('Created VM'), instance=instance)
return vm_ref
def destroy_vm(session, instance, vm_ref):
"""Destroys a VM record."""
try:
session.call_xenapi('VM.destroy', vm_ref)
except session.XenAPI.Failure, exc:
LOG.exception(exc)
return
LOG.debug(_("VM destroyed"), instance=instance)
def clean_shutdown_vm(session, instance, vm_ref):
if _is_vm_shutdown(session, vm_ref):
LOG.warn(_("VM already halted, skipping shutdown..."),
instance=instance)
return False
LOG.debug(_("Shutting down VM (cleanly)"), instance=instance)
try:
session.call_xenapi('VM.clean_shutdown', vm_ref)
except session.XenAPI.Failure, exc:
LOG.exception(exc)
return False
return True
def hard_shutdown_vm(session, instance, vm_ref):
if _is_vm_shutdown(session, vm_ref):
LOG.warn(_("VM already halted, skipping shutdown..."),
instance=instance)
return False
LOG.debug(_("Shutting down VM (hard)"), instance=instance)
try:
session.call_xenapi('VM.hard_shutdown', vm_ref)
except session.XenAPI.Failure, exc:
LOG.exception(exc)
return False
return True
def _is_vm_shutdown(session, vm_ref):
vm_rec = session.call_xenapi("VM.get_record", vm_ref)
state = compile_info(vm_rec)['state']
if state == power_state.SHUTDOWN:
return True
return False
def ensure_free_mem(session, instance):
instance_type = instance_types.extract_instance_type(instance)
mem = long(instance_type['memory_mb']) * 1024 * 1024
host = session.get_xenapi_host()
host_free_mem = long(session.call_xenapi("host.compute_free_memory",
host))
return host_free_mem >= mem
def find_vbd_by_number(session, vm_ref, number):
"""Get the VBD reference from the device number."""
vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref)
if vbd_refs:
for vbd_ref in vbd_refs:
try:
vbd_rec = session.call_xenapi("VBD.get_record", vbd_ref)
if vbd_rec['userdevice'] == str(number):
return vbd_ref
except session.XenAPI.Failure, exc:
LOG.exception(exc)
raise volume_utils.StorageError(
_('VBD not found in instance %s') % vm_ref)
def unplug_vbd(session, vbd_ref):
"""Unplug VBD from VM."""
# Call VBD.unplug on the given VBD, with a retry if we get
# DEVICE_DETACH_REJECTED. For reasons which we don't understand,
# we're seeing the device still in use, even when all processes
# using the device should be dead.
max_attempts = CONF.xenapi_num_vbd_unplug_retries + 1
for num_attempt in xrange(1, max_attempts + 1):
try:
session.call_xenapi('VBD.unplug', vbd_ref)
return
except session.XenAPI.Failure, exc:
err = len(exc.details) > 0 and exc.details[0]
if err == 'DEVICE_ALREADY_DETACHED':
LOG.info(_('VBD %s already detached'), vbd_ref)
return
elif err == 'DEVICE_DETACH_REJECTED':
LOG.info(_('VBD %(vbd_ref)s detach rejected, attempt'
' %(num_attempt)d/%(max_attempts)d'), locals())
else:
LOG.exception(exc)
raise volume_utils.StorageError(
_('Unable to unplug VBD %s') % vbd_ref)
greenthread.sleep(1)
raise volume_utils.StorageError(
_('Reached maximum number of retries trying to unplug VBD %s')
% vbd_ref)
def destroy_vbd(session, vbd_ref):
"""Destroy VBD from host database."""
try:
session.call_xenapi('VBD.destroy', vbd_ref)
except session.XenAPI.Failure, exc:
LOG.exception(exc)
raise volume_utils.StorageError(
_('Unable to destroy VBD %s') % vbd_ref)
def create_vbd(session, vm_ref, vdi_ref, userdevice, vbd_type='disk',
read_only=False, bootable=False, osvol=False,
empty=False, unpluggable=True):
"""Create a VBD record and returns its reference."""
vbd_rec = {}
vbd_rec['VM'] = vm_ref
if vdi_ref == None:
vdi_ref = 'OpaqueRef:NULL'
vbd_rec['VDI'] = vdi_ref
vbd_rec['userdevice'] = str(userdevice)
vbd_rec['bootable'] = bootable
vbd_rec['mode'] = read_only and 'RO' or 'RW'
vbd_rec['type'] = vbd_type
vbd_rec['unpluggable'] = unpluggable
vbd_rec['empty'] = empty
vbd_rec['other_config'] = {}
vbd_rec['qos_algorithm_type'] = ''
vbd_rec['qos_algorithm_params'] = {}
vbd_rec['qos_supported_algorithms'] = []
LOG.debug(_('Creating %(vbd_type)s-type VBD for VM %(vm_ref)s,'
' VDI %(vdi_ref)s ... '), locals())
vbd_ref = session.call_xenapi('VBD.create', vbd_rec)
LOG.debug(_('Created VBD %(vbd_ref)s for VM %(vm_ref)s,'
' VDI %(vdi_ref)s.'), locals())
if osvol:
# set osvol=True in other-config to indicate this is an
# attached nova (or cinder) volume
session.call_xenapi("VBD.add_to_other_config",
vbd_ref, 'osvol', "True")
return vbd_ref
def attach_cd(session, vm_ref, vdi_ref, userdevice):
"""Create an empty VBD, then insert the CD."""
vbd_ref = create_vbd(session, vm_ref, None, userdevice,
vbd_type='cd', read_only=True,
bootable=True, empty=True,
unpluggable=False)
session.call_xenapi('VBD.insert', vbd_ref, vdi_ref)
return vbd_ref
def destroy_vdi(session, vdi_ref):
try:
session.call_xenapi('VDI.destroy', vdi_ref)
except session.XenAPI.Failure, exc:
LOG.exception(exc)
raise volume_utils.StorageError(
_('Unable to destroy VDI %s') % vdi_ref)
def safe_destroy_vdis(session, vdi_refs):
"""Destroys the requested VDIs, logging any StorageError exceptions."""
for vdi_ref in vdi_refs:
try:
destroy_vdi(session, vdi_ref)
except volume_utils.StorageError as exc:
LOG.error(exc)
def create_vdi(session, sr_ref, instance, name_label, disk_type, virtual_size,
read_only=False):
"""Create a VDI record and returns its reference."""
# create_vdi may be called simply while creating a volume
# hence information about instance may or may not be present
otherconf = {'nova_disk_type': disk_type}
if instance:
otherconf['nova_instance_uuid'] = instance['uuid']
vdi_ref = session.call_xenapi("VDI.create",
{'name_label': name_label,
'name_description': disk_type,
'SR': sr_ref,
'virtual_size': str(virtual_size),
'type': 'User',
'sharable': False,
'read_only': read_only,
'xenstore_data': {},
'other_config': otherconf,
'sm_config': {},
'tags': []})
LOG.debug(_('Created VDI %(vdi_ref)s (%(name_label)s,'
' %(virtual_size)s, %(read_only)s) on %(sr_ref)s.'),
locals())
return vdi_ref
def get_vdi_uuid_for_volume(session, connection_data):
sr_uuid, label, sr_params = volume_utils.parse_sr_info(connection_data)
sr_ref = volume_utils.find_sr_by_uuid(session, sr_uuid)
if not sr_ref:
sr_ref = volume_utils.introduce_sr(session, sr_uuid, label, sr_params)
if sr_ref is None:
raise exception.NovaException(_('SR not present and could not be '
'introduced'))
vdi_uuid = None
if 'vdi_uuid' in connection_data:
session.call_xenapi("SR.scan", sr_ref)
vdi_uuid = connection_data['vdi_uuid']
else:
try:
vdi_ref = volume_utils.introduce_vdi(session, sr_ref)
vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
vdi_uuid = vdi_rec['uuid']
except volume_utils.StorageError, exc:
LOG.exception(exc)
volume_utils.forget_sr(session, sr_ref)
return vdi_uuid
def get_vdis_for_instance(context, session, instance, name_label, image,
image_type, block_device_info=None):
vdis = {}
if block_device_info:
LOG.debug(_("block device info: %s"), block_device_info)
root_device_name = block_device_info['root_device_name']
for bdm in block_device_info['block_device_mapping']:
if (block_device.strip_prefix(bdm['mount_device']) ==
block_device.strip_prefix(root_device_name)):
# If we're a root-device, record that fact so we don't download
# a root image via Glance
type_ = 'root'
else:
# Otherwise, use mount_device as `type_` so that we have easy
# access to it in _attach_disks to create the VBD
type_ = bdm['mount_device']
connection_data = bdm['connection_info']['data']
vdi_uuid = get_vdi_uuid_for_volume(session, connection_data)
if vdi_uuid:
vdis[type_] = dict(uuid=vdi_uuid, file=None, osvol=True)
# If we didn't get a root VDI from volumes, then use the Glance image as
# the root device
if 'root' not in vdis:
create_image_vdis = _create_image(
context, session, instance, name_label, image, image_type)
vdis.update(create_image_vdis)
return vdis
@contextlib.contextmanager
def _dummy_vm(session, instance, vdi_ref):
"""This creates a temporary VM so that we can snapshot a VDI.
VDI's can't be snapshotted directly since the API expects a `vm_ref`. To
work around this, we need to create a temporary VM and then map the VDI to
the VM using a temporary VBD.
"""
name_label = "dummy"
vm_ref = create_vm(session, instance, name_label, None, None)
try:
vbd_ref = create_vbd(session, vm_ref, vdi_ref, 'autodetect',
read_only=True)
try:
yield vm_ref
finally:
try:
destroy_vbd(session, vbd_ref)
except volume_utils.StorageError:
# destroy_vbd() will log error
pass
finally:
destroy_vm(session, instance, vm_ref)
def _safe_copy_vdi(session, sr_ref, instance, vdi_to_copy_ref):
"""Copy a VDI and return the new VDIs reference.
This function differs from the XenAPI `VDI.copy` call in that the copy is
atomic and isolated, meaning we don't see half-downloaded images. It
accomplishes this by copying the VDI's into a temporary directory and then
atomically renaming them into the SR when the copy is completed.
The correct long term solution is to fix `VDI.copy` so that it is atomic
and isolated.
"""
with _dummy_vm(session, instance, vdi_to_copy_ref) as vm_ref:
label = "snapshot"
with snapshot_attached_here(
session, instance, vm_ref, label) as vdi_uuids:
imported_vhds = session.call_plugin_serialized(
'workarounds', 'safe_copy_vdis', sr_path=get_sr_path(session),
vdi_uuids=vdi_uuids, uuid_stack=_make_uuid_stack())
root_uuid = imported_vhds['root']['uuid']
# TODO(sirp): for safety, we should probably re-scan the SR after every
# call to a dom0 plugin, since there is a possibility that the underlying
# VHDs changed
scan_default_sr(session)
vdi_ref = session.call_xenapi('VDI.get_by_uuid', root_uuid)
return vdi_ref
def _clone_vdi(session, vdi_to_clone_ref):
"""Clones a VDI and return the new VDIs reference."""
vdi_ref = session.call_xenapi('VDI.clone', vdi_to_clone_ref)
LOG.debug(_('Cloned VDI %(vdi_ref)s from VDI '
'%(vdi_to_clone_ref)s') % locals())
return vdi_ref
def set_vdi_name(session, vdi_uuid, label, description, vdi_ref=None):
vdi_ref = vdi_ref or session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
session.call_xenapi('VDI.set_name_label', vdi_ref, label)
session.call_xenapi('VDI.set_name_description', vdi_ref, description)
def get_vdi_for_vm_safely(session, vm_ref):
"""Retrieves the primary VDI for a VM."""
vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref)
for vbd in vbd_refs:
vbd_rec = session.call_xenapi("VBD.get_record", vbd)
# Convention dictates the primary VDI will be userdevice 0
if vbd_rec['userdevice'] == '0':
vdi_rec = session.call_xenapi("VDI.get_record", vbd_rec['VDI'])
return vbd_rec['VDI'], vdi_rec
raise exception.NovaException(_("No primary VDI found for %(vm_ref)s")
% locals())
@contextlib.contextmanager
def snapshot_attached_here(session, instance, vm_ref, label, *args):
update_task_state = None
if len(args) == 1:
update_task_state = args[0]
"""Snapshot the root disk only. Return a list of uuids for the vhds
in the chain.
"""
LOG.debug(_("Starting snapshot for VM"), instance=instance)
# Memorize the original_parent_uuid so we can poll for coalesce
vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref)
original_parent_uuid = _get_vhd_parent_uuid(session, vm_vdi_ref)
sr_ref = vm_vdi_rec["SR"]
snapshot_ref = session.call_xenapi("VDI.snapshot", vm_vdi_ref, {})
if update_task_state is not None:
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
try:
snapshot_rec = session.call_xenapi("VDI.get_record", snapshot_ref)
_wait_for_vhd_coalesce(session, instance, sr_ref, vm_vdi_ref,
original_parent_uuid)
vdi_uuids = [vdi_rec['uuid'] for vdi_rec in
_walk_vdi_chain(session, snapshot_rec['uuid'])]
yield vdi_uuids
finally:
safe_destroy_vdis(session, [snapshot_ref])
def get_sr_path(session):
"""Return the path to our storage repository
This is used when we're dealing with VHDs directly, either by taking
snapshots or by restoring an image in the DISK_VHD format.
"""
sr_ref = safe_find_sr(session)
sr_rec = session.call_xenapi("SR.get_record", sr_ref)
sr_uuid = sr_rec["uuid"]
return os.path.join(CONF.xenapi_sr_base_path, sr_uuid)
def destroy_cached_images(session, sr_ref, all_cached=False, dry_run=False):
"""Destroy used or unused cached images.
A cached image that is being used by at least one VM is said to be 'used'.
In the case of an 'unused' image, the cached image will be the only
descendent of the base-copy. So when we delete the cached-image, the
refcount will drop to zero and XenServer will automatically destroy the
base-copy for us.
The default behavior of this function is to destroy only 'unused' cached
images. To destroy all cached images, use the `all_cached=True` kwarg.
"""
cached_images = _find_cached_images(session, sr_ref)
destroyed = set()
def destroy_cached_vdi(vdi_uuid, vdi_ref):
LOG.debug(_("Destroying cached VDI '%(vdi_uuid)s'"))
if not dry_run:
destroy_vdi(session, vdi_ref)
destroyed.add(vdi_uuid)
for vdi_ref in cached_images.values():
vdi_uuid = session.call_xenapi('VDI.get_uuid', vdi_ref)
if all_cached:
destroy_cached_vdi(vdi_uuid, vdi_ref)
continue
# Unused-Only: Search for siblings
# Chain length greater than two implies a VM must be holding a ref to
# the base-copy (otherwise it would have coalesced), so consider this
# cached image used.
chain = list(_walk_vdi_chain(session, vdi_uuid))
if len(chain) > 2:
continue
elif len(chain) == 2:
# Siblings imply cached image is used
root_vdi_rec = chain[-1]
children = _child_vhds(session, sr_ref, root_vdi_rec['uuid'])
if len(children) > 1:
continue
destroy_cached_vdi(vdi_uuid, vdi_ref)
return destroyed
def _find_cached_images(session, sr_ref):
"""Return a dict(uuid=vdi_ref) representing all cached images."""
cached_images = {}
for vdi_ref, vdi_rec in _get_all_vdis_in_sr(session, sr_ref):
try:
image_id = vdi_rec['other_config']['image-id']
except KeyError:
continue
cached_images[image_id] = vdi_ref
return cached_images
def _find_cached_image(session, image_id, sr_ref):
"""Returns the vdi-ref of the cached image."""
cached_images = _find_cached_images(session, sr_ref)
return cached_images.get(image_id)
def resize_disk(session, instance, vdi_ref, instance_type):
# Copy VDI over to something we can resize
# NOTE(jerdfelt): Would be nice to just set vdi_ref to read/write
sr_ref = safe_find_sr(session)
copy_ref = session.call_xenapi('VDI.copy', vdi_ref, sr_ref)
try:
# Resize partition and filesystem down
auto_configure_disk(session, copy_ref, instance_type['root_gb'])
# Create new VDI
vdi_size = instance_type['root_gb'] * 1024 * 1024 * 1024
# NOTE(johannes): No resizing allowed for rescue instances, so
# using instance['name'] is safe here
new_ref = create_vdi(session, sr_ref, instance, instance['name'],
'root', vdi_size)
new_uuid = session.call_xenapi('VDI.get_uuid', new_ref)
# Manually copy contents over
virtual_size = instance_type['root_gb'] * 1024 * 1024 * 1024
_copy_partition(session, copy_ref, new_ref, 1, virtual_size)
return new_ref, new_uuid
finally:
destroy_vdi(session, copy_ref)
def auto_configure_disk(session, vdi_ref, new_gb):
"""Partition and resize FS to match the size specified by
instance_types.root_gb.
This is a fail-safe to prevent accidentally destroying data on a disk
erroneously marked as auto_disk_config=True.
The criteria for allowing resize are:
1. 'auto_disk_config' must be true for the instance (and image).
(If we've made it here, then auto_disk_config=True.)
2. The disk must have only one partition.
3. The file-system on the one partition must be ext3 or ext4.
"""
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
partitions = _get_partitions(dev)
if len(partitions) != 1:
return
_num, start, old_sectors, ptype = partitions[0]
if ptype in ('ext3', 'ext4'):
new_sectors = new_gb * 1024 * 1024 * 1024 / SECTOR_SIZE
_resize_part_and_fs(dev, start, old_sectors, new_sectors)
def _generate_disk(session, instance, vm_ref, userdevice, name_label,
disk_type, size_mb, fs_type):
"""
Steps to programmatically generate a disk:
1. Create VDI of desired size
2. Attach VDI to compute worker
3. Create partition
4. Create VBD between instance VM and VDI
"""
# 1. Create VDI
sr_ref = safe_find_sr(session)
ONE_MEG = 1024 * 1024
virtual_size = size_mb * ONE_MEG
vdi_ref = create_vdi(session, sr_ref, instance, name_label, disk_type,
virtual_size)
try:
# 2. Attach VDI to compute worker (VBD hotplug)
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
# 3. Create partition
dev_path = utils.make_dev_path(dev)
utils.execute('parted', '--script', dev_path,
'mklabel', 'msdos', run_as_root=True)
partition_start = 0
partition_end = size_mb
utils.execute('parted', '--script', dev_path,
'mkpart', 'primary',
str(partition_start),
str(partition_end),
run_as_root=True)
partition_path = utils.make_dev_path(dev, partition=1)
if fs_type == 'linux-swap':
utils.execute('mkswap', partition_path, run_as_root=True)
elif fs_type is not None:
utils.execute('mkfs', '-t', fs_type, partition_path,
run_as_root=True)
# 4. Create VBD between instance VM and swap VDI
create_vbd(session, vm_ref, vdi_ref, userdevice, bootable=False)
except Exception:
with excutils.save_and_reraise_exception():
destroy_vdi(session, vdi_ref)
def generate_swap(session, instance, vm_ref, userdevice, name_label, swap_mb):
# NOTE(jk0): We use a FAT32 filesystem for the Windows swap
# partition because that is what parted supports.
is_windows = instance['os_type'] == "windows"
fs_type = "vfat" if is_windows else "linux-swap"
_generate_disk(session, instance, vm_ref, userdevice, name_label,
'swap', swap_mb, fs_type)
def generate_ephemeral(session, instance, vm_ref, userdevice, name_label,
size_gb):
_generate_disk(session, instance, vm_ref, userdevice, name_label,
'ephemeral', size_gb * 1024,
CONF.default_ephemeral_format)
def generate_iso_blank_root_disk(session, instance, vm_ref, userdevice,
name_label, size_gb):
_generate_disk(session, instance, vm_ref, userdevice, name_label,
'user', size_gb * 1024, CONF.default_ephemeral_format)
def generate_configdrive(session, instance, vm_ref, userdevice,
admin_password=None, files=None):
sr_ref = safe_find_sr(session)
vdi_ref = create_vdi(session, sr_ref, instance, 'config-2',
'configdrive', configdrive.CONFIGDRIVESIZE_BYTES)
try:
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
extra_md = {}
if admin_password:
extra_md['admin_pass'] = admin_password
inst_md = instance_metadata.InstanceMetadata(instance,
content=files,
extra_md=extra_md)
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
with utils.tempdir() as tmp_path:
tmp_file = os.path.join(tmp_path, 'configdrive')
cdb.make_drive(tmp_file)
dev_path = utils.make_dev_path(dev)
utils.execute('dd',
'if=%s' % tmp_file,
'of=%s' % dev_path,
run_as_root=True)
create_vbd(session, vm_ref, vdi_ref, userdevice, bootable=False,
read_only=True)
except Exception:
with excutils.save_and_reraise_exception():
destroy_vdi(session, vdi_ref)
def create_kernel_image(context, session, instance, name_label, image_id,
image_type):
"""Creates kernel/ramdisk file from the image stored in the cache.
If the image is not present in the cache, it streams it from glance.
Returns: A list of dictionaries that describe VDIs
"""
filename = ""
if CONF.cache_images:
args = {}
args['cached-image'] = image_id
args['new-image-uuid'] = str(uuid.uuid4())
filename = session.call_plugin('kernel', 'create_kernel_ramdisk', args)
if filename == "":
return _fetch_disk_image(context, session, instance, name_label,
image_id, image_type)
else:
vdi_type = ImageType.to_string(image_type)
return {vdi_type: dict(uuid=None, file=filename)}
def destroy_kernel_ramdisk(session, kernel, ramdisk):
args = {}
if kernel:
args['kernel-file'] = kernel
if ramdisk:
args['ramdisk-file'] = ramdisk
if args:
session.call_plugin('kernel', 'remove_kernel_ramdisk', args)
def _create_cached_image(context, session, instance, name_label,
image_id, image_type):
sr_ref = safe_find_sr(session)
sr_type = session.call_xenapi('SR.get_record', sr_ref)["type"]
vdis = {}
if CONF.use_cow_images and sr_type != "ext":
LOG.warning(_("Fast cloning is only supported on default local SR "
"of type ext. SR on this system was found to be of "
"type %(sr_type)s. Ignoring the cow flag.")
% locals())
root_vdi_ref = _find_cached_image(session, image_id, sr_ref)
if root_vdi_ref is None:
vdis = _fetch_image(context, session, instance, name_label,
image_id, image_type)
root_vdi = vdis['root']
root_vdi_ref = session.call_xenapi('VDI.get_by_uuid',
root_vdi['uuid'])
set_vdi_name(session, root_vdi['uuid'], 'Glance Image %s' % image_id,
'root', vdi_ref=root_vdi_ref)
session.call_xenapi('VDI.add_to_other_config',
root_vdi_ref, 'image-id', str(image_id))
if CONF.use_cow_images and sr_type == 'ext':
new_vdi_ref = _clone_vdi(session, root_vdi_ref)
else:
new_vdi_ref = _safe_copy_vdi(session, sr_ref, instance, root_vdi_ref)
# Set the name label for the image we just created and remove image id
# field from other-config.
session.call_xenapi('VDI.remove_from_other_config',
new_vdi_ref, 'image-id')
vdi_type = ImageType.get_role(image_type)
vdi_uuid = session.call_xenapi('VDI.get_uuid', new_vdi_ref)
vdis[vdi_type] = dict(uuid=vdi_uuid, file=None)
return vdis
def _create_image(context, session, instance, name_label, image_id,
image_type):
"""Creates VDI from the image stored in the local cache. If the image
is not present in the cache, it streams it from glance.
Returns: A list of dictionaries that describe VDIs
"""
cache_images = CONF.cache_images.lower()
# Deterimine if the image is cacheable
if image_type == ImageType.DISK_ISO:
cache = False
elif cache_images == 'all':
cache = True
elif cache_images == 'some':
sys_meta = utils.metadata_to_dict(instance['system_metadata'])
try:
cache = utils.bool_from_str(sys_meta['image_cache_in_nova'])
except KeyError:
cache = False
elif cache_images == 'none':
cache = False
else:
LOG.warning(_("Unrecognized cache_images value '%s', defaulting to"
" True"), CONF.cache_images)
cache = True
# Fetch (and cache) the image
if cache:
vdis = _create_cached_image(context, session, instance, name_label,
image_id, image_type)
else:
vdis = _fetch_image(context, session, instance, name_label,
image_id, image_type)
# Set the name label and description to easily identify what
# instance and disk it's for
for vdi_type, vdi in vdis.iteritems():
set_vdi_name(session, vdi['uuid'], name_label, vdi_type)
return vdis
def _fetch_image(context, session, instance, name_label, image_id, image_type):
"""Fetch image from glance based on image type.
Returns: A single filename if image_type is KERNEL or RAMDISK
A list of dictionaries that describe VDIs, otherwise
"""
if image_type == ImageType.DISK_VHD:
vdis = _fetch_vhd_image(context, session, instance, image_id)
else:
vdis = _fetch_disk_image(context, session, instance, name_label,
image_id, image_type)
for vdi_type, vdi in vdis.iteritems():
vdi_uuid = vdi['uuid']
LOG.debug(_("Fetched VDIs of type '%(vdi_type)s' with UUID"
" '%(vdi_uuid)s'"),
locals(), instance=instance)
return vdis
def _fetch_using_dom0_plugin_with_retry(context, session, image_id,
plugin_name, params, callback=None):
max_attempts = CONF.glance_num_retries + 1
sleep_time = 0.5
for attempt_num in xrange(1, max_attempts + 1):
LOG.info(_('download_vhd %(image_id)s, '
'attempt %(attempt_num)d/%(max_attempts)d, '
'params: %(params)s') % locals())
try:
if callback:
callback(params)
return session.call_plugin_serialized(
plugin_name, 'download_vhd', **params)
except session.XenAPI.Failure as exc:
_type, _method, error = exc.details[:3]
if error == 'RetryableError':
LOG.error(_('download_vhd failed: %r') %
(exc.details[3:],))
else:
raise
time.sleep(sleep_time)
sleep_time = min(2 * sleep_time, 15)
raise exception.CouldNotFetchImage(image_id=image_id)
def _make_uuid_stack():
# NOTE(sirp): The XenAPI plugins run under Python 2.4
# which does not have the `uuid` module. To work around this,
# we generate the uuids here (under Python 2.6+) and
# pass them as arguments
return [str(uuid.uuid4()) for i in xrange(MAX_VDI_CHAIN_SIZE)]
def _image_uses_bittorrent(context, instance):
bittorrent = False
xenapi_torrent_images = CONF.xenapi_torrent_images.lower()
if xenapi_torrent_images == 'all':
bittorrent = True
elif xenapi_torrent_images == 'some':
sys_meta = utils.metadata_to_dict(instance['system_metadata'])
try:
bittorrent = utils.bool_from_str(sys_meta['image_bittorrent'])
except KeyError:
pass
elif xenapi_torrent_images == 'none':
pass
else:
LOG.warning(_("Invalid value '%s' for xenapi_torrent_images"),
xenapi_torrent_images)
return bittorrent
def _fetch_vhd_image(context, session, instance, image_id):
"""Tell glance to download an image and put the VHDs into the SR
Returns: A list of dictionaries that describe VDIs
"""
LOG.debug(_("Asking xapi to fetch vhd image %(image_id)s"), locals(),
instance=instance)
params = {'image_id': image_id,
'uuid_stack': _make_uuid_stack(),
'sr_path': get_sr_path(session)}
if _image_uses_bittorrent(context, instance):
plugin_name = 'bittorrent'
callback = None
params['torrent_base_url'] = CONF.xenapi_torrent_base_url
params['torrent_seed_duration'] = CONF.xenapi_torrent_seed_duration
params['torrent_seed_chance'] = CONF.xenapi_torrent_seed_chance
params['torrent_max_last_accessed'] =\
CONF.xenapi_torrent_max_last_accessed
params['torrent_listen_port_start'] =\
CONF.xenapi_torrent_listen_port_start
params['torrent_listen_port_end'] =\
CONF.xenapi_torrent_listen_port_end
params['torrent_download_stall_cutoff'] =\
CONF.xenapi_torrent_download_stall_cutoff
params['torrent_max_seeder_processes_per_host'] =\
CONF.xenapi_torrent_max_seeder_processes_per_host
else:
plugin_name = 'glance'
glance_api_servers = glance.get_api_servers()
def pick_glance(params):
g_host, g_port, g_use_ssl = glance_api_servers.next()
params['glance_host'] = g_host
params['glance_port'] = g_port
params['glance_use_ssl'] = g_use_ssl
params['auth_token'] = getattr(context, 'auth_token', None)
callback = pick_glance
vdis = _fetch_using_dom0_plugin_with_retry(
context, session, image_id, plugin_name, params,
callback=callback)
sr_ref = safe_find_sr(session)
_scan_sr(session, sr_ref)
# Pull out the UUID of the root VDI
root_vdi_uuid = vdis['root']['uuid']
# Set the name-label to ease debugging
set_vdi_name(session, root_vdi_uuid, instance['name'], 'root')
_check_vdi_size(context, session, instance, root_vdi_uuid)
return vdis
def _get_vdi_chain_size(session, vdi_uuid):
"""Compute the total size of a VDI chain, starting with the specified
VDI UUID.
This will walk the VDI chain to the root, add the size of each VDI into
the total.
"""
size_bytes = 0
for vdi_rec in _walk_vdi_chain(session, vdi_uuid):
cur_vdi_uuid = vdi_rec['uuid']
vdi_size_bytes = int(vdi_rec['physical_utilisation'])
LOG.debug(_('vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes='
'%(vdi_size_bytes)d'), locals())
size_bytes += vdi_size_bytes
return size_bytes
def _check_vdi_size(context, session, instance, vdi_uuid):
size_bytes = _get_vdi_chain_size(session, vdi_uuid)
# FIXME(jk0): this was copied directly from compute.manager.py, let's
# refactor this to a common area
instance_type = instance_types.extract_instance_type(instance)
allowed_size_gb = instance_type['root_gb']
allowed_size_bytes = allowed_size_gb * 1024 * 1024 * 1024
LOG.debug(_("image_size_bytes=%(size_bytes)d, allowed_size_bytes="
"%(allowed_size_bytes)d"), locals(), instance=instance)
if size_bytes > allowed_size_bytes:
LOG.info(_("Image size %(size_bytes)d exceeded instance_type "
"allowed size %(allowed_size_bytes)d"),
locals(), instance=instance)
raise exception.ImageTooLarge()
def _fetch_disk_image(context, session, instance, name_label, image_id,
image_type):
"""Fetch the image from Glance
NOTE:
Unlike _fetch_vhd_image, this method does not use the Glance
plugin; instead, it streams the disks through domU to the VDI
directly.
Returns: A single filename if image_type is KERNEL_RAMDISK
A list of dictionaries that describe VDIs, otherwise
"""
# FIXME(sirp): Since the Glance plugin seems to be required for the
# VHD disk, it may be worth using the plugin for both VHD and RAW and
# DISK restores
image_type_str = ImageType.to_string(image_type)
LOG.debug(_("Fetching image %(image_id)s, type %(image_type_str)s"),
locals(), instance=instance)
if image_type == ImageType.DISK_ISO:
sr_ref = _safe_find_iso_sr(session)
else:
sr_ref = safe_find_sr(session)
image_service, image_id = glance.get_remote_image_service(
context, image_id)
meta = image_service.show(context, image_id)
virtual_size = int(meta['size'])
vdi_size = virtual_size
LOG.debug(_("Size for image %(image_id)s: %(virtual_size)d"), locals(),
instance=instance)
if image_type == ImageType.DISK:
# Make room for MBR.
vdi_size += MBR_SIZE_BYTES
elif (image_type in (ImageType.KERNEL, ImageType.RAMDISK) and
vdi_size > CONF.max_kernel_ramdisk_size):
max_size = CONF.max_kernel_ramdisk_size
raise exception.NovaException(
_("Kernel/Ramdisk image is too large: %(vdi_size)d bytes, "
"max %(max_size)d bytes") % locals())
vdi_ref = create_vdi(session, sr_ref, instance, name_label,
image_type_str, vdi_size)
# From this point we have a VDI on Xen host;
# If anything goes wrong, we need to remember its uuid.
try:
filename = None
vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref)
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
stream_func = lambda f: image_service.download(
context, image_id, f)
_stream_disk(stream_func, image_type, virtual_size, dev)
if image_type in (ImageType.KERNEL, ImageType.RAMDISK):
# We need to invoke a plugin for copying the
# content of the VDI into the proper path.
LOG.debug(_("Copying VDI %s to /boot/guest on dom0"),
vdi_ref, instance=instance)
args = {}
args['vdi-ref'] = vdi_ref
# Let the plugin copy the correct number of bytes.
args['image-size'] = str(vdi_size)
if CONF.cache_images:
args['cached-image'] = image_id
filename = session.call_plugin('kernel', 'copy_vdi', args)
# Remove the VDI as it is not needed anymore.
destroy_vdi(session, vdi_ref)
LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi_ref,
instance=instance)
vdi_role = ImageType.get_role(image_type)
return {vdi_role: dict(uuid=None, file=filename)}
else:
vdi_role = ImageType.get_role(image_type)
return {vdi_role: dict(uuid=vdi_uuid, file=None)}
except (session.XenAPI.Failure, IOError, OSError) as e:
# We look for XenAPI and OS failures.
LOG.exception(_("Failed to fetch glance image"),
instance=instance)
e.args = e.args + ([dict(type=ImageType.to_string(image_type),
uuid=vdi_uuid,
file=filename)],)
raise
def determine_disk_image_type(image_meta):
"""Disk Image Types are used to determine where the kernel will reside
within an image. To figure out which type we're dealing with, we use
the following rules:
1. If we're using Glance, we can use the image_type field to
determine the image_type
2. If we're not using Glance, then we need to deduce this based on
whether a kernel_id is specified.
"""
if not image_meta:
return None
disk_format = image_meta['disk_format']
disk_format_map = {
'ami': 'DISK',
'aki': 'KERNEL',
'ari': 'RAMDISK',
'raw': 'DISK_RAW',
'vhd': 'DISK_VHD',
'iso': 'DISK_ISO',
}
try:
image_type_str = disk_format_map[disk_format]
except KeyError:
raise exception.InvalidDiskFormat(disk_format=disk_format)
image_type = getattr(ImageType, image_type_str)
image_ref = image_meta['id']
msg = _("Detected %(image_type_str)s format for image %(image_ref)s")
LOG.debug(msg % locals())
return image_type
def determine_is_pv(session, vdi_ref, disk_image_type, os_type):
"""
Determine whether the VM will use a paravirtualized kernel or if it
will use hardware virtualization.
1. Glance (VHD): then we use `os_type`, raise if not set
2. Glance (DISK_RAW): use Pygrub to figure out if pv kernel is
available
3. Glance (DISK): pv is assumed
4. Glance (DISK_ISO): no pv is assumed
5. Boot From Volume - without image metadata (None): attempt to
use Pygrub to figure out if the volume stores a PV VM or a
HVM one. Log a warning, because there may be cases where the
volume is RAW (in which case using pygrub is fine) and cases
where the content of the volume is VHD, and pygrub might not
work as expected.
NOTE: if disk_image_type is not specified, instances launched
from remote volumes will have to include kernel and ramdisk
because external kernel and ramdisk will not be fetched.
"""
LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref)
if disk_image_type == ImageType.DISK_VHD:
# 1. VHD
if os_type == 'windows':
is_pv = False
else:
is_pv = True
elif disk_image_type == ImageType.DISK_RAW:
# 2. RAW
with vdi_attached_here(session, vdi_ref, read_only=True) as dev:
is_pv = _is_vdi_pv(dev)
elif disk_image_type == ImageType.DISK:
# 3. Disk
is_pv = True
elif disk_image_type == ImageType.DISK_ISO:
# 4. ISO
is_pv = False
elif not disk_image_type:
LOG.warning(_("Image format is None: trying to determine PV status "
"using pygrub; if instance with vdi %s does not boot "
"correctly, try with image metadata.") % vdi_ref)
with vdi_attached_here(session, vdi_ref, read_only=True) as dev:
is_pv = _is_vdi_pv(dev)
else:
msg = _("Unknown image format %(disk_image_type)s") % locals()
raise exception.NovaException(msg)
return is_pv
def set_vm_name_label(session, vm_ref, name_label):
session.call_xenapi("VM.set_name_label", vm_ref, name_label)
def list_vms(session):
for vm_ref, vm_rec in session.get_all_refs_and_recs('VM'):
if (vm_rec["resident_on"] != session.get_xenapi_host() or
vm_rec["is_a_template"] or vm_rec["is_control_domain"]):
continue
else:
yield vm_ref, vm_rec
def lookup_vm_vdis(session, vm_ref):
"""Look for the VDIs that are attached to the VM."""
# Firstly we get the VBDs, then the VDIs.
# TODO(Armando): do we leave the read-only devices?
vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref)
vdi_refs = []
if vbd_refs:
for vbd_ref in vbd_refs:
try:
vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref)
# Test valid VDI
record = session.call_xenapi("VDI.get_record", vdi_ref)
LOG.debug(_('VDI %s is still available'), record['uuid'])
vbd_other_config = session.call_xenapi("VBD.get_other_config",
vbd_ref)
if not vbd_other_config.get('osvol'):
# This is not an attached volume
vdi_refs.append(vdi_ref)
except session.XenAPI.Failure, exc:
LOG.exception(exc)
return vdi_refs
def lookup(session, name_label):
"""Look the instance up and return it if available."""
vm_refs = session.call_xenapi("VM.get_by_name_label", name_label)
n = len(vm_refs)
if n == 0:
return None
elif n > 1:
raise exception.InstanceExists(name=name_label)
else:
return vm_refs[0]
def preconfigure_instance(session, instance, vdi_ref, network_info):
"""Makes alterations to the image before launching as part of spawn.
"""
# As mounting the image VDI is expensive, we only want do do it once,
# if at all, so determine whether it's required first, and then do
# everything
mount_required = False
key, net, metadata = _prepare_injectables(instance, network_info)
mount_required = key or net or metadata
if not mount_required:
return
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
_mounted_processing(dev, key, net, metadata)
def lookup_kernel_ramdisk(session, vm):
vm_rec = session.call_xenapi("VM.get_record", vm)
if 'PV_kernel' in vm_rec and 'PV_ramdisk' in vm_rec:
return (vm_rec['PV_kernel'], vm_rec['PV_ramdisk'])
else:
return (None, None)
def is_snapshot(session, vm):
vm_rec = session.call_xenapi("VM.get_record", vm)
if 'is_a_template' in vm_rec and 'is_a_snapshot' in vm_rec:
return vm_rec['is_a_template'] and vm_rec['is_a_snapshot']
else:
return False
def compile_info(record):
"""Fill record with VM status information."""
return {'state': XENAPI_POWER_STATE[record['power_state']],
'max_mem': long(record['memory_static_max']) >> 10,
'mem': long(record['memory_dynamic_max']) >> 10,
'num_cpu': record['VCPUs_max'],
'cpu_time': 0}
def compile_diagnostics(record):
"""Compile VM diagnostics data."""
try:
keys = []
diags = {}
vm_uuid = record["uuid"]
xml = _get_rrd(_get_rrd_server(), vm_uuid)
if xml:
rrd = minidom.parseString(xml)
for i, node in enumerate(rrd.firstChild.childNodes):
# Provide the last update of the information
if node.localName == 'lastupdate':
diags['last_update'] = node.firstChild.data
# Create a list of the diagnostic keys (in their order)
if node.localName == 'ds':
ref = node.childNodes
# Name and Value
if len(ref) > 6:
keys.append(ref[0].firstChild.data)
# Read the last row of the first RRA to get the latest info
if node.localName == 'rra':
rows = node.childNodes[4].childNodes
last_row = rows[rows.length - 1].childNodes
for j, value in enumerate(last_row):
diags[keys[j]] = value.firstChild.data
break
return diags
except expat.ExpatError as e:
LOG.exception(_('Unable to parse rrd of %(vm_uuid)s') % locals())
return {"Unable to retrieve diagnostics": e}
def fetch_bandwidth(session):
bw = session.call_plugin_serialized('bandwidth', 'fetch_all_bandwidth')
return bw
def compile_metrics(start_time, stop_time=None):
"""Compile bandwidth usage, cpu, and disk metrics for all VMs on
this host.
Note that some stats, like bandwidth, do not seem to be very
accurate in some of the data from XenServer (mdragon). """
start_time = int(start_time)
xml = _get_rrd_updates(_get_rrd_server(), start_time)
if xml:
doc = minidom.parseString(xml)
return _parse_rrd_update(doc, start_time, stop_time)
raise exception.CouldNotFetchMetrics()
def _scan_sr(session, sr_ref=None):
"""Scans the SR specified by sr_ref."""
if sr_ref:
LOG.debug(_("Re-scanning SR %s"), sr_ref)
session.call_xenapi('SR.scan', sr_ref)
def scan_default_sr(session):
"""Looks for the system default SR and triggers a re-scan."""
_scan_sr(session, _find_sr(session))
def safe_find_sr(session):
"""Same as _find_sr except raises a NotFound exception if SR cannot be
determined
"""
sr_ref = _find_sr(session)
if sr_ref is None:
raise exception.StorageRepositoryNotFound()
return sr_ref
def _find_sr(session):
"""Return the storage repository to hold VM images."""
host = session.get_xenapi_host()
try:
tokens = CONF.sr_matching_filter.split(':')
filter_criteria = tokens[0]
filter_pattern = tokens[1]
except IndexError:
# oops, flag is invalid
LOG.warning(_("Flag sr_matching_filter '%s' does not respect "
"formatting convention"), CONF.sr_matching_filter)
return None
if filter_criteria == 'other-config':
key, value = filter_pattern.split('=', 1)
for sr_ref, sr_rec in session.get_all_refs_and_recs('SR'):
if not (key in sr_rec['other_config'] and
sr_rec['other_config'][key] == value):
continue
for pbd_ref in sr_rec['PBDs']:
pbd_rec = session.get_rec('PBD', pbd_ref)
if pbd_rec and pbd_rec['host'] == host:
return sr_ref
elif filter_criteria == 'default-sr' and filter_pattern == 'true':
pool_ref = session.call_xenapi('pool.get_all')[0]
return session.call_xenapi('pool.get_default_SR', pool_ref)
# No SR found!
LOG.warning(_("XenAPI is unable to find a Storage Repository to "
"install guest instances on. Please check your "
"configuration and/or configure the flag "
"'sr_matching_filter'"))
return None
def _safe_find_iso_sr(session):
"""Same as _find_iso_sr except raises a NotFound exception if SR
cannot be determined
"""
sr_ref = _find_iso_sr(session)
if sr_ref is None:
raise exception.NotFound(_('Cannot find SR of content-type ISO'))
return sr_ref
def _find_iso_sr(session):
"""Return the storage repository to hold ISO images."""
host = session.get_xenapi_host()
for sr_ref, sr_rec in session.get_all_refs_and_recs('SR'):
LOG.debug(_("ISO: looking at SR %(sr_rec)s") % locals())
if not sr_rec['content_type'] == 'iso':
LOG.debug(_("ISO: not iso content"))
continue
if 'i18n-key' not in sr_rec['other_config']:
LOG.debug(_("ISO: iso content_type, no 'i18n-key' key"))
continue
if not sr_rec['other_config']['i18n-key'] == 'local-storage-iso':
LOG.debug(_("ISO: iso content_type, i18n-key value not "
"'local-storage-iso'"))
continue
LOG.debug(_("ISO: SR MATCHing our criteria"))
for pbd_ref in sr_rec['PBDs']:
LOG.debug(_("ISO: ISO, looking to see if it is host local"))
pbd_rec = session.get_rec('PBD', pbd_ref)
if not pbd_rec:
LOG.debug(_("ISO: PBD %(pbd_ref)s disappeared") % locals())
continue
pbd_rec_host = pbd_rec['host']
LOG.debug(_("ISO: PBD matching, want %(pbd_rec)s, "
"have %(host)s") % locals())
if pbd_rec_host == host:
LOG.debug(_("ISO: SR with local PBD"))
return sr_ref
return None
def _get_rrd_server():
"""Return server's scheme and address to use for retrieving RRD XMLs."""
xs_url = urlparse.urlparse(CONF.xenapi_connection_url)
return [xs_url.scheme, xs_url.netloc]
def _get_rrd(server, vm_uuid):
"""Return the VM RRD XML as a string."""
try:
xml = urllib.urlopen("%s://%s:%s@%s/vm_rrd?uuid=%s" % (
server[0],
CONF.xenapi_connection_username,
CONF.xenapi_connection_password,
server[1],
vm_uuid))
return xml.read()
except IOError:
LOG.exception(_('Unable to obtain RRD XML for VM %(vm_uuid)s with '
'server details: %(server)s.') % locals())
return None
def _get_rrd_updates(server, start_time):
"""Return the RRD updates XML as a string."""
try:
xml = urllib.urlopen("%s://%s:%s@%s/rrd_updates?start=%s" % (
server[0],
CONF.xenapi_connection_username,
CONF.xenapi_connection_password,
server[1],
start_time))
return xml.read()
except IOError:
LOG.exception(_('Unable to obtain RRD XML updates with '
'server details: %(server)s.') % locals())
return None
def _parse_rrd_meta(doc):
data = {}
meta = doc.getElementsByTagName('meta')[0]
for tag in ('start', 'end', 'step'):
data[tag] = int(meta.getElementsByTagName(tag)[0].firstChild.data)
legend = meta.getElementsByTagName('legend')[0]
data['legend'] = [child.firstChild.data for child in legend.childNodes]
return data
def _parse_rrd_data(doc):
dnode = doc.getElementsByTagName('data')[0]
return [dict(
time=int(child.getElementsByTagName('t')[0].firstChild.data),
values=[decimal.Decimal(valnode.firstChild.data)
for valnode in child.getElementsByTagName('v')])
for child in dnode.childNodes]
def _parse_rrd_update(doc, start, until=None):
sum_data = {}
meta = _parse_rrd_meta(doc)
data = _parse_rrd_data(doc)
for col, collabel in enumerate(meta['legend']):
_datatype, _objtype, uuid, name = collabel.split(':')
vm_data = sum_data.get(uuid, dict())
if name.startswith('vif'):
vm_data[name] = _integrate_series(data, col, start, until)
else:
vm_data[name] = _average_series(data, col, until)
sum_data[uuid] = vm_data
return sum_data
def _average_series(data, col, until=None):
vals = [row['values'][col] for row in data
if (not until or (row['time'] <= until)) and
row['values'][col].is_finite()]
if vals:
try:
return (sum(vals) / len(vals)).quantize(decimal.Decimal('1.0000'))
except decimal.InvalidOperation:
# (mdragon) Xenserver occasionally returns odd values in
# data that will throw an error on averaging (see bug 918490)
# These are hard to find, since, whatever those values are,
# Decimal seems to think they are a valid number, sortof.
# We *think* we've got the the cases covered, but just in
# case, log and return NaN, so we don't break reporting of
# other statistics.
LOG.error(_("Invalid statistics data from Xenserver: %s")
% str(vals))
return decimal.Decimal('NaN')
else:
return decimal.Decimal('0.0000')
def _integrate_series(data, col, start, until=None):
total = decimal.Decimal('0.0000')
prev_time = int(start)
prev_val = None
for row in reversed(data):
if not until or (row['time'] <= until):
time = row['time']
val = row['values'][col]
if val.is_nan():
val = decimal.Decimal('0.0000')
if prev_val is None:
prev_val = val
if prev_val >= val:
total += ((val * (time - prev_time)) +
(decimal.Decimal('0.5000') * (prev_val - val) *
(time - prev_time)))
else:
total += ((prev_val * (time - prev_time)) +
(decimal.Decimal('0.5000') * (val - prev_val) *
(time - prev_time)))
prev_time = time
prev_val = val
return total.quantize(decimal.Decimal('1.0000'))
def _get_all_vdis_in_sr(session, sr_ref):
for vdi_ref in session.call_xenapi('SR.get_VDIs', sr_ref):
try:
vdi_rec = session.call_xenapi('VDI.get_record', vdi_ref)
yield vdi_ref, vdi_rec
except session.XenAPI.Failure:
continue
def get_instance_vdis_for_sr(session, vm_ref, sr_ref):
"""Return opaqueRef for all the vdis which live on sr."""
for vbd_ref in session.call_xenapi('VM.get_VBDs', vm_ref):
try:
vdi_ref = session.call_xenapi('VBD.get_VDI', vbd_ref)
if sr_ref == session.call_xenapi('VDI.get_SR', vdi_ref):
yield vdi_ref
except session.XenAPI.Failure:
continue
def _get_vhd_parent_uuid(session, vdi_ref):
vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
if 'vhd-parent' not in vdi_rec['sm_config']:
return None
parent_uuid = vdi_rec['sm_config']['vhd-parent']
vdi_uuid = vdi_rec['uuid']
LOG.debug(_("VHD %(vdi_uuid)s has parent %(parent_uuid)s") % locals())
return parent_uuid
def _walk_vdi_chain(session, vdi_uuid):
"""Yield vdi_recs for each element in a VDI chain."""
scan_default_sr(session)
while True:
vdi_ref = session.call_xenapi("VDI.get_by_uuid", vdi_uuid)
vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
yield vdi_rec
parent_uuid = _get_vhd_parent_uuid(session, vdi_ref)
if not parent_uuid:
break
vdi_uuid = parent_uuid
def _child_vhds(session, sr_ref, vdi_uuid):
"""Return the immediate children of a given VHD.
This is not recursive, only the immediate children are returned.
"""
children = set()
for ref, rec in _get_all_vdis_in_sr(session, sr_ref):
rec_uuid = rec['uuid']
if rec_uuid == vdi_uuid:
continue
parent_uuid = _get_vhd_parent_uuid(session, ref)
if parent_uuid != vdi_uuid:
continue
children.add(rec_uuid)
return children
def _wait_for_vhd_coalesce(session, instance, sr_ref, vdi_ref,
original_parent_uuid):
"""Spin until the parent VHD is coalesced into its parent VHD
Before coalesce:
* original_parent_vhd
* parent_vhd
snapshot
After coalesce:
* parent_vhd
snapshot
"""
def _another_child_vhd():
if not original_parent_uuid:
return False
# Search for any other vdi which parents to original parent and is not
# in the active vm/instance vdi chain.
vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid']
parent_vdi_uuid = _get_vhd_parent_uuid(session, vdi_ref)
for _ref, rec in _get_all_vdis_in_sr(session, sr_ref):
if ((rec['uuid'] != vdi_uuid) and
(rec['uuid'] != parent_vdi_uuid) and
(rec['sm_config'].get('vhd-parent') == original_parent_uuid)):
# Found another vhd which too parents to original parent.
return True
# Found no other vdi with the same parent.
return False
# Check if original parent has any other child. If so, coalesce will
# not take place.
if _another_child_vhd():
parent_uuid = _get_vhd_parent_uuid(session, vdi_ref)
parent_ref = session.call_xenapi("VDI.get_by_uuid", parent_uuid)
base_uuid = _get_vhd_parent_uuid(session, parent_ref)
return parent_uuid, base_uuid
# NOTE(sirp): This rescan is necessary to ensure the VM's `sm_config`
# matches the underlying VHDs.
_scan_sr(session, sr_ref)
max_attempts = CONF.xenapi_vhd_coalesce_max_attempts
for i in xrange(max_attempts):
_scan_sr(session, sr_ref)
parent_uuid = _get_vhd_parent_uuid(session, vdi_ref)
if original_parent_uuid and (parent_uuid != original_parent_uuid):
LOG.debug(_("Parent %(parent_uuid)s doesn't match original parent"
" %(original_parent_uuid)s, waiting for coalesce..."),
locals(), instance=instance)
else:
parent_ref = session.call_xenapi("VDI.get_by_uuid", parent_uuid)
base_uuid = _get_vhd_parent_uuid(session, parent_ref)
return parent_uuid, base_uuid
greenthread.sleep(CONF.xenapi_vhd_coalesce_poll_interval)
msg = (_("VHD coalesce attempts exceeded (%(max_attempts)d)"
", giving up...") % locals())
raise exception.NovaException(msg)
def _remap_vbd_dev(dev):
"""Return the appropriate location for a plugged-in VBD device
Ubuntu Maverick moved xvd? -> sd?. This is considered a bug and will be
fixed in future versions:
https://bugs.launchpad.net/ubuntu/+source/linux/+bug/684875
For now, we work around it by just doing a string replace.
"""
# NOTE(sirp): This hack can go away when we pull support for Maverick
should_remap = CONF.xenapi_remap_vbd_dev
if not should_remap:
return dev
old_prefix = 'xvd'
new_prefix = CONF.xenapi_remap_vbd_dev_prefix
remapped_dev = dev.replace(old_prefix, new_prefix)
return remapped_dev
def _wait_for_device(dev):
"""Wait for device node to appear."""
for i in xrange(0, CONF.block_device_creation_timeout):
dev_path = utils.make_dev_path(dev)
if os.path.exists(dev_path):
return
time.sleep(1)
raise volume_utils.StorageError(
_('Timeout waiting for device %s to be created') % dev)
def cleanup_attached_vdis(session):
"""Unplug any instance VDIs left after an unclean restart."""
this_vm_ref = _get_this_vm_ref(session)
vbd_refs = session.call_xenapi('VM.get_VBDs', this_vm_ref)
for vbd_ref in vbd_refs:
try:
vbd_rec = session.call_xenapi('VBD.get_record', vbd_ref)
vdi_rec = session.call_xenapi('VDI.get_record', vbd_rec['VDI'])
except session.XenAPI.Failure, e:
if e.details[0] != 'HANDLE_INVALID':
raise
continue
if 'nova_instance_uuid' in vdi_rec['other_config']:
# Belongs to an instance and probably left over after an
# unclean restart
LOG.info(_('Disconnecting stale VDI %s from compute domU'),
vdi_rec['uuid'])
unplug_vbd(session, vbd_ref)
destroy_vbd(session, vbd_ref)
@contextlib.contextmanager
def vdi_attached_here(session, vdi_ref, read_only=False):
this_vm_ref = _get_this_vm_ref(session)
vbd_ref = create_vbd(session, this_vm_ref, vdi_ref, 'autodetect',
read_only=read_only, bootable=False)
try:
LOG.debug(_('Plugging VBD %s ... '), vbd_ref)
session.call_xenapi("VBD.plug", vbd_ref)
try:
LOG.debug(_('Plugging VBD %s done.'), vbd_ref)
orig_dev = session.call_xenapi("VBD.get_device", vbd_ref)
LOG.debug(_('VBD %(vbd_ref)s plugged as %(orig_dev)s') % locals())
dev = _remap_vbd_dev(orig_dev)
if dev != orig_dev:
LOG.debug(_('VBD %(vbd_ref)s plugged into wrong dev, '
'remapping to %(dev)s') % locals())
_wait_for_device(dev)
yield dev
finally:
LOG.debug(_('Destroying VBD for VDI %s ... '), vdi_ref)
unplug_vbd(session, vbd_ref)
finally:
try:
destroy_vbd(session, vbd_ref)
except volume_utils.StorageError:
# destroy_vbd() will log error
pass
LOG.debug(_('Destroying VBD for VDI %s done.'), vdi_ref)
def _get_sys_hypervisor_uuid():
with file('/sys/hypervisor/uuid') as f:
return f.readline().strip()
def get_this_vm_uuid():
try:
return _get_sys_hypervisor_uuid()
except IOError:
# Some guest kernels (without 5c13f8067745efc15f6ad0158b58d57c44104c25)
# cannot read from uuid after a reboot. Fall back to trying xenstore.
# See https://bugs.launchpad.net/ubuntu/+source/xen-api/+bug/1081182
domid, _ = utils.execute('xenstore-read', 'domid', run_as_root=True)
vm_key, _ = utils.execute('xenstore-read',
'/local/domain/%s/vm' % domid.strip(),
run_as_root=True)
return vm_key.strip()[4:]
def _get_this_vm_ref(session):
return session.call_xenapi("VM.get_by_uuid", get_this_vm_uuid())
def _is_vdi_pv(dev):
LOG.debug(_("Running pygrub against %s"), dev)
dev_path = utils.make_dev_path(dev)
try:
out, err = utils.execute('pygrub', '-qn', dev_path, run_as_root=True)
for line in out:
# try to find kernel string
m = re.search('(?<=kernel:)/.*(?:>)', line)
if m and m.group(0).find('xen') != -1:
LOG.debug(_("Found Xen kernel %s") % m.group(0))
return True
LOG.debug(_("No Xen kernel found. Booting HVM."))
except exception.ProcessExecutionError:
LOG.exception(_("Error while executing pygrub! Please, ensure the "
"binary is installed correctly, and available in your "
"PATH; on some Linux distros, pygrub may be installed "
"in /usr/lib/xen-X.Y/bin/pygrub. Attempting to boot "
"in HVM mode."))
return False
def _get_partitions(dev):
"""Return partition information (num, size, type) for a device."""
dev_path = utils.make_dev_path(dev)
out, _err = utils.execute('parted', '--script', '--machine',
dev_path, 'unit s', 'print',
run_as_root=True)
lines = [line for line in out.split('\n') if line]
partitions = []
LOG.debug(_("Partitions:"))
for line in lines[2:]:
num, start, end, size, ptype = line.split(':')[:5]
start = int(start.rstrip('s'))
end = int(end.rstrip('s'))
size = int(size.rstrip('s'))
LOG.debug(_(" %(num)s: %(ptype)s %(size)d sectors") % locals())
partitions.append((num, start, size, ptype))
return partitions
def _stream_disk(image_service_func, image_type, virtual_size, dev):
offset = 0
if image_type == ImageType.DISK:
offset = MBR_SIZE_BYTES
_write_partition(virtual_size, dev)
dev_path = utils.make_dev_path(dev)
with utils.temporary_chown(dev_path):
with open(dev_path, 'wb') as f:
f.seek(offset)
image_service_func(f)
def _write_partition(virtual_size, dev):
dev_path = utils.make_dev_path(dev)
primary_first = MBR_SIZE_SECTORS
primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1
LOG.debug(_('Writing partition table %(primary_first)d %(primary_last)d'
' to %(dev_path)s...'), locals())
def execute(*cmd, **kwargs):
return utils.execute(*cmd, **kwargs)
execute('parted', '--script', dev_path, 'mklabel', 'msdos',
run_as_root=True)
execute('parted', '--script', dev_path, 'mkpart', 'primary',
'%ds' % primary_first,
'%ds' % primary_last,
run_as_root=True)
LOG.debug(_('Writing partition table %s done.'), dev_path)
def _resize_part_and_fs(dev, start, old_sectors, new_sectors):
"""Resize partition and fileystem.
This assumes we are dealing with a single primary partition and using
ext3 or ext4.
"""
size = new_sectors - start
end = new_sectors - 1
dev_path = utils.make_dev_path(dev)
partition_path = utils.make_dev_path(dev, partition=1)
# Replay journal if FS wasn't cleanly unmounted
# Exit Code 1 = File system errors corrected
# 2 = File system errors corrected, system needs a reboot
utils.execute('e2fsck', '-f', '-y', partition_path, run_as_root=True,
check_exit_code=[0, 1, 2])
# Remove ext3 journal (making it ext2)
utils.execute('tune2fs', '-O ^has_journal', partition_path,
run_as_root=True)
if new_sectors < old_sectors:
# Resizing down, resize filesystem before partition resize
utils.execute('resize2fs', partition_path, '%ds' % size,
run_as_root=True)
utils.execute('parted', '--script', dev_path, 'rm', '1',
run_as_root=True)
utils.execute('parted', '--script', dev_path, 'mkpart',
'primary',
'%ds' % start,
'%ds' % end,
run_as_root=True)
if new_sectors > old_sectors:
# Resizing up, resize filesystem after partition resize
utils.execute('resize2fs', partition_path, run_as_root=True)
# Add back journal
utils.execute('tune2fs', '-j', partition_path, run_as_root=True)
def _sparse_copy(src_path, dst_path, virtual_size, block_size=4096):
"""Copy data, skipping long runs of zeros to create a sparse file."""
start_time = time.time()
EMPTY_BLOCK = '\0' * block_size
bytes_read = 0
skipped_bytes = 0
left = virtual_size
LOG.debug(_("Starting sparse_copy src=%(src_path)s dst=%(dst_path)s "
"virtual_size=%(virtual_size)d block_size=%(block_size)d"),
locals())
# NOTE(sirp): we need read/write access to the devices; since we don't have
# the luxury of shelling out to a sudo'd command, we temporarily take
# ownership of the devices.
with utils.temporary_chown(src_path):
with utils.temporary_chown(dst_path):
with open(src_path, "r") as src:
with open(dst_path, "w") as dst:
data = src.read(min(block_size, left))
while data:
if data == EMPTY_BLOCK:
dst.seek(block_size, os.SEEK_CUR)
left -= block_size
bytes_read += block_size
skipped_bytes += block_size
else:
dst.write(data)
data_len = len(data)
left -= data_len
bytes_read += data_len
if left <= 0:
break
data = src.read(min(block_size, left))
greenthread.sleep(0)
duration = time.time() - start_time
compression_pct = float(skipped_bytes) / bytes_read * 100
LOG.debug(_("Finished sparse_copy in %(duration).2f secs, "
"%(compression_pct).2f%% reduction in size"), locals())
def _copy_partition(session, src_ref, dst_ref, partition, virtual_size):
# Part of disk taken up by MBR
virtual_size -= MBR_SIZE_BYTES
with vdi_attached_here(session, src_ref, read_only=True) as src:
src_path = utils.make_dev_path(src, partition=partition)
with vdi_attached_here(session, dst_ref, read_only=False) as dst:
dst_path = utils.make_dev_path(dst, partition=partition)
_write_partition(virtual_size, dst)
if CONF.xenapi_sparse_copy:
_sparse_copy(src_path, dst_path, virtual_size)
else:
num_blocks = virtual_size / SECTOR_SIZE
utils.execute('dd',
'if=%s' % src_path,
'of=%s' % dst_path,
'count=%d' % num_blocks,
run_as_root=True)
def _mount_filesystem(dev_path, dir):
"""mounts the device specified by dev_path in dir."""
try:
_out, err = utils.execute('mount',
'-t', 'ext2,ext3,ext4,reiserfs',
dev_path, dir, run_as_root=True)
except exception.ProcessExecutionError as e:
err = str(e)
return err
def _mounted_processing(device, key, net, metadata):
"""Callback which runs with the image VDI attached."""
# NB: Partition 1 hardcoded
dev_path = utils.make_dev_path(device, partition=1)
with utils.tempdir() as tmpdir:
# Mount only Linux filesystems, to avoid disturbing NTFS images
err = _mount_filesystem(dev_path, tmpdir)
if not err:
try:
# This try block ensures that the umount occurs
if not agent.find_guest_agent(tmpdir):
vfs = vfsimpl.VFSLocalFS(imgfile=None,
imgfmt=None,
imgdir=tmpdir)
LOG.info(_('Manipulating interface files directly'))
# for xenapi, we don't 'inject' admin_password here,
# it's handled at instance startup time, nor do we
# support injecting arbitrary files here.
disk.inject_data_into_fs(vfs,
key, net, metadata, None, None)
finally:
utils.execute('umount', dev_path, run_as_root=True)
else:
LOG.info(_('Failed to mount filesystem (expected for '
'non-linux instances): %s') % err)
def _prepare_injectables(inst, network_info):
"""
prepares the ssh key and the network configuration file to be
injected into the disk image
"""
#do the import here - Cheetah.Template will be loaded
#only if injection is performed
from Cheetah import Template as t
template = t.Template
template_data = open(CONF.injected_network_template).read()
metadata = inst['metadata']
key = str(inst['key_data'])
net = None
if network_info:
ifc_num = -1
interfaces_info = []
for vif in network_info:
ifc_num += 1
try:
if not vif['network'].get_meta('injected'):
# network is not specified injected
continue
except KeyError:
# vif network is None
continue
# NOTE(tr3buchet): using all subnets in case dns is stored in a
# subnet that isn't chosen as first v4 or v6
# subnet in the case where there is more than one
# dns = list of address of each dns entry from each vif subnet
dns = [ip['address'] for subnet in vif['network']['subnets']
for ip in subnet['dns']]
dns = ' '.join(dns).strip()
interface_info = {'name': 'eth%d' % ifc_num,
'address': '',
'netmask': '',
'gateway': '',
'broadcast': '',
'dns': dns or '',
'address_v6': '',
'netmask_v6': '',
'gateway_v6': '',
'use_ipv6': CONF.use_ipv6}
# NOTE(tr3buchet): the original code used the old network_info
# which only supported a single ipv4 subnet
# (and optionally, a single ipv6 subnet).
# I modified it to use the new network info model,
# which adds support for multiple v4 or v6
# subnets. I chose to ignore any additional
# subnets, just as the original code ignored
# additional IP information
# populate v4 info if v4 subnet and ip exist
try:
# grab the first v4 subnet (or it raises)
subnet = [s for s in vif['network']['subnets']
if s['version'] == 4][0]
# get the subnet's first ip (or it raises)
ip = subnet['ips'][0]
# populate interface_info
subnet_netaddr = subnet.as_netaddr()
interface_info['address'] = ip['address']
interface_info['netmask'] = subnet_netaddr.netmask
interface_info['gateway'] = subnet['gateway']['address']
interface_info['broadcast'] = subnet_netaddr.broadcast
except IndexError:
# there isn't a v4 subnet or there are no ips
pass
# populate v6 info if v6 subnet and ip exist
try:
# grab the first v6 subnet (or it raises)
subnet = [s for s in vif['network']['subnets']
if s['version'] == 6][0]
# get the subnet's first ip (or it raises)
ip = subnet['ips'][0]
# populate interface_info
interface_info['address_v6'] = ip['address']
interface_info['netmask_v6'] = subnet.as_netaddr().netmask
interface_info['gateway_v6'] = subnet['gateway']['address']
except IndexError:
# there isn't a v6 subnet or there are no ips
pass
interfaces_info.append(interface_info)
if interfaces_info:
net = str(template(template_data,
searchList=[{'interfaces': interfaces_info,
'use_ipv6': CONF.use_ipv6}]))
return key, net, metadata
def ensure_correct_host(session):
"""Ensure we're connected to the host we're running on. This is the
required configuration for anything that uses vdi_attached_here."""
this_vm_uuid = get_this_vm_uuid()
try:
session.call_xenapi('VM.get_by_uuid', this_vm_uuid)
except session.XenAPI.Failure as exc:
if exc.details[0] != 'UUID_INVALID':
raise
raise Exception(_('This domU must be running on the host '
'specified by xenapi_connection_url'))
def move_disks(session, instance, disk_info):
"""Move and possibly link VHDs via the XAPI plugin."""
imported_vhds = session.call_plugin_serialized(
'migration', 'move_vhds_into_sr', instance_uuid=instance['uuid'],
sr_path=get_sr_path(session), uuid_stack=_make_uuid_stack())
# Now we rescan the SR so we find the VHDs
scan_default_sr(session)
# Set name-label so we can find if we need to clean up a failed
# migration
root_uuid = imported_vhds['root']['uuid']
set_vdi_name(session, root_uuid, instance['name'], 'root')
root_vdi_ref = session.call_xenapi('VDI.get_by_uuid', root_uuid)
return {'uuid': root_uuid, 'ref': root_vdi_ref}
def vm_ref_or_raise(session, instance_name):
vm_ref = lookup(session, instance_name)
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance_name)
return vm_ref
|
afandria/sky_engine | refs/heads/master | sky/tools/webkitpy/test/skip.py | 174 | # Copyright (C) 2010 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
_log = logging.getLogger(__name__)
def skip_if(klass, condition, message=None, logger=None):
"""Makes all test_* methods in a given class no-ops if the given condition
is False. Backported from Python 3.1+'s unittest.skipIf decorator."""
if not logger:
logger = _log
if not condition:
return klass
for name in dir(klass):
attr = getattr(klass, name)
if not callable(attr):
continue
if not name.startswith('test_'):
continue
setattr(klass, name, _skipped_method(attr, message, logger))
klass._printed_skipped_message = False
return klass
def _skipped_method(method, message, logger):
def _skip(*args):
if method.im_class._printed_skipped_message:
return
method.im_class._printed_skipped_message = True
logger.info('Skipping %s.%s: %s' % (method.__module__, method.im_class.__name__, message))
return _skip
|
ojii/sandlib | refs/heads/master | lib/lib-python/2.7/curses/ascii.py | 396 | """Constants and membership tests for ASCII characters"""
NUL = 0x00 # ^@
SOH = 0x01 # ^A
STX = 0x02 # ^B
ETX = 0x03 # ^C
EOT = 0x04 # ^D
ENQ = 0x05 # ^E
ACK = 0x06 # ^F
BEL = 0x07 # ^G
BS = 0x08 # ^H
TAB = 0x09 # ^I
HT = 0x09 # ^I
LF = 0x0a # ^J
NL = 0x0a # ^J
VT = 0x0b # ^K
FF = 0x0c # ^L
CR = 0x0d # ^M
SO = 0x0e # ^N
SI = 0x0f # ^O
DLE = 0x10 # ^P
DC1 = 0x11 # ^Q
DC2 = 0x12 # ^R
DC3 = 0x13 # ^S
DC4 = 0x14 # ^T
NAK = 0x15 # ^U
SYN = 0x16 # ^V
ETB = 0x17 # ^W
CAN = 0x18 # ^X
EM = 0x19 # ^Y
SUB = 0x1a # ^Z
ESC = 0x1b # ^[
FS = 0x1c # ^\
GS = 0x1d # ^]
RS = 0x1e # ^^
US = 0x1f # ^_
SP = 0x20 # space
DEL = 0x7f # delete
controlnames = [
"NUL", "SOH", "STX", "ETX", "EOT", "ENQ", "ACK", "BEL",
"BS", "HT", "LF", "VT", "FF", "CR", "SO", "SI",
"DLE", "DC1", "DC2", "DC3", "DC4", "NAK", "SYN", "ETB",
"CAN", "EM", "SUB", "ESC", "FS", "GS", "RS", "US",
"SP"
]
def _ctoi(c):
if type(c) == type(""):
return ord(c)
else:
return c
def isalnum(c): return isalpha(c) or isdigit(c)
def isalpha(c): return isupper(c) or islower(c)
def isascii(c): return _ctoi(c) <= 127 # ?
def isblank(c): return _ctoi(c) in (8,32)
def iscntrl(c): return _ctoi(c) <= 31
def isdigit(c): return _ctoi(c) >= 48 and _ctoi(c) <= 57
def isgraph(c): return _ctoi(c) >= 33 and _ctoi(c) <= 126
def islower(c): return _ctoi(c) >= 97 and _ctoi(c) <= 122
def isprint(c): return _ctoi(c) >= 32 and _ctoi(c) <= 126
def ispunct(c): return _ctoi(c) != 32 and not isalnum(c)
def isspace(c): return _ctoi(c) in (9, 10, 11, 12, 13, 32)
def isupper(c): return _ctoi(c) >= 65 and _ctoi(c) <= 90
def isxdigit(c): return isdigit(c) or \
(_ctoi(c) >= 65 and _ctoi(c) <= 70) or (_ctoi(c) >= 97 and _ctoi(c) <= 102)
def isctrl(c): return _ctoi(c) < 32
def ismeta(c): return _ctoi(c) > 127
def ascii(c):
if type(c) == type(""):
return chr(_ctoi(c) & 0x7f)
else:
return _ctoi(c) & 0x7f
def ctrl(c):
if type(c) == type(""):
return chr(_ctoi(c) & 0x1f)
else:
return _ctoi(c) & 0x1f
def alt(c):
if type(c) == type(""):
return chr(_ctoi(c) | 0x80)
else:
return _ctoi(c) | 0x80
def unctrl(c):
bits = _ctoi(c)
if bits == 0x7f:
rep = "^?"
elif isprint(bits & 0x7f):
rep = chr(bits & 0x7f)
else:
rep = "^" + chr(((bits & 0x7f) | 0x20) + 0x20)
if bits & 0x80:
return "!" + rep
return rep
|
yongshengwang/hue | refs/heads/master | build/env/lib/python2.7/site-packages/ipython-0.10-py2.7.egg/IPython/tests/test_magic.py | 7 | """Tests for various magic functions.
Needs to be run by nose (to make ipython session available).
"""
import os
import sys
import tempfile
import types
import nose.tools as nt
from IPython.platutils import find_cmd, get_long_path_name
from IPython.testing import decorators as dec
from IPython.testing import tools as tt
#-----------------------------------------------------------------------------
# Test functions begin
def test_rehashx():
# clear up everything
_ip.IP.alias_table.clear()
del _ip.db['syscmdlist']
_ip.magic('rehashx')
# Practically ALL ipython development systems will have more than 10 aliases
yield (nt.assert_true, len(_ip.IP.alias_table) > 10)
for key, val in _ip.IP.alias_table.items():
# we must strip dots from alias names
nt.assert_true('.' not in key)
# rehashx must fill up syscmdlist
scoms = _ip.db['syscmdlist']
yield (nt.assert_true, len(scoms) > 10)
def doctest_hist_f():
"""Test %hist -f with temporary filename.
In [9]: import tempfile
In [10]: tfile = tempfile.mktemp('.py','tmp-ipython-')
In [11]: %hist -n -f $tfile 3
"""
def doctest_hist_r():
"""Test %hist -r
XXX - This test is not recording the output correctly. Not sure why...
In [20]: 'hist' in _ip.IP.lsmagic()
Out[20]: True
In [6]: x=1
In [7]: %hist -n -r 2
x=1 # random
hist -n -r 2 # random
"""
# This test is known to fail on win32.
# See ticket https://bugs.launchpad.net/bugs/366334
def test_obj_del():
"""Test that object's __del__ methods are called on exit."""
test_dir = os.path.dirname(__file__)
del_file = os.path.join(test_dir,'obj_del.py')
ipython_cmd = find_cmd('ipython')
out = _ip.IP.getoutput('%s %s' % (ipython_cmd, del_file))
nt.assert_equals(out,'obj_del.py: object A deleted')
def test_shist():
# Simple tests of ShadowHist class - test generator.
import os, shutil, tempfile
from IPython.Extensions import pickleshare
from IPython.history import ShadowHist
tfile = tempfile.mktemp('','tmp-ipython-')
db = pickleshare.PickleShareDB(tfile)
s = ShadowHist(db)
s.add('hello')
s.add('world')
s.add('hello')
s.add('hello')
s.add('karhu')
yield nt.assert_equals,s.all(),[(1, 'hello'), (2, 'world'), (3, 'karhu')]
yield nt.assert_equal,s.get(2),'world'
shutil.rmtree(tfile)
@dec.skipif_not_numpy
def test_numpy_clear_array_undec():
from IPython.Extensions import clearcmd
_ip.ex('import numpy as np')
_ip.ex('a = np.empty(2)')
yield (nt.assert_true, 'a' in _ip.user_ns)
_ip.magic('clear array')
yield (nt.assert_false, 'a' in _ip.user_ns)
@dec.skip()
def test_fail_dec(*a,**k):
yield nt.assert_true, False
@dec.skip('This one shouldn not run')
def test_fail_dec2(*a,**k):
yield nt.assert_true, False
@dec.skipknownfailure
def test_fail_dec3(*a,**k):
yield nt.assert_true, False
def doctest_refbug():
"""Very nasty problem with references held by multiple runs of a script.
See: https://bugs.launchpad.net/ipython/+bug/269966
In [1]: _ip.IP.clear_main_mod_cache()
In [2]: run refbug
In [3]: call_f()
lowercased: hello
In [4]: run refbug
In [5]: call_f()
lowercased: hello
lowercased: hello
"""
#-----------------------------------------------------------------------------
# Tests for %run
#-----------------------------------------------------------------------------
# %run is critical enough that it's a good idea to have a solid collection of
# tests for it, some as doctests and some as normal tests.
def doctest_run_ns():
"""Classes declared %run scripts must be instantiable afterwards.
In [11]: run tclass foo
In [12]: isinstance(f(),foo)
Out[12]: True
"""
def doctest_run_ns2():
"""Classes declared %run scripts must be instantiable afterwards.
In [4]: run tclass C-first_pass
In [5]: run tclass C-second_pass
tclass.py: deleting object: C-first_pass
"""
def doctest_run_builtins():
"""Check that %run doesn't damage __builtins__ via a doctest.
This is similar to the test_run_builtins, but I want *both* forms of the
test to catch any possible glitches in our testing machinery, since that
modifies %run somewhat. So for this, we have both a normal test (below)
and a doctest (this one).
In [1]: import tempfile
In [2]: bid1 = id(__builtins__)
In [3]: fname = tempfile.mkstemp()[1]
In [3]: f = open(fname,'w')
In [4]: f.write('pass\\n')
In [5]: f.flush()
In [6]: print type(__builtins__)
<type 'module'>
In [7]: %run "$fname"
In [7]: f.close()
In [8]: bid2 = id(__builtins__)
In [9]: print type(__builtins__)
<type 'module'>
In [10]: bid1 == bid2
Out[10]: True
In [12]: try:
....: os.unlink(fname)
....: except:
....: pass
....:
"""
# For some tests, it will be handy to organize them in a class with a common
# setup that makes a temp file
class TestMagicRun(object):
def setup(self):
"""Make a valid python temp file."""
fname = tempfile.mkstemp()[1]
f = open(fname,'w')
f.write('pass\n')
f.flush()
self.tmpfile = f
self.fname = fname
def run_tmpfile(self):
# This fails on Windows if self.tmpfile.name has spaces or "~" in it.
# See below and ticket https://bugs.launchpad.net/bugs/366353
_ip.magic('run "%s"' % self.fname)
def test_builtins_id(self):
"""Check that %run doesn't damage __builtins__ """
# Test that the id of __builtins__ is not modified by %run
bid1 = id(_ip.user_ns['__builtins__'])
self.run_tmpfile()
bid2 = id(_ip.user_ns['__builtins__'])
tt.assert_equals(bid1, bid2)
def test_builtins_type(self):
"""Check that the type of __builtins__ doesn't change with %run.
However, the above could pass if __builtins__ was already modified to
be a dict (it should be a module) by a previous use of %run. So we
also check explicitly that it really is a module:
"""
self.run_tmpfile()
tt.assert_equals(type(_ip.user_ns['__builtins__']),type(sys))
def test_prompts(self):
"""Test that prompts correctly generate after %run"""
self.run_tmpfile()
p2 = str(_ip.IP.outputcache.prompt2).strip()
nt.assert_equals(p2[:3], '...')
def teardown(self):
self.tmpfile.close()
try:
os.unlink(self.fname)
except:
# On Windows, even though we close the file, we still can't delete
# it. I have no clue why
pass
# Multiple tests for clipboard pasting
def test_paste():
def paste(txt):
hooks.clipboard_get = lambda : txt
_ip.magic('paste')
# Inject fake clipboard hook but save original so we can restore it later
hooks = _ip.IP.hooks
user_ns = _ip.user_ns
original_clip = hooks.clipboard_get
try:
# This try/except with an emtpy except clause is here only because
# try/yield/finally is invalid syntax in Python 2.4. This will be
# removed when we drop 2.4-compatibility, and the emtpy except below
# will be changed to a finally.
# Run tests with fake clipboard function
user_ns.pop('x', None)
paste('x=1')
yield (nt.assert_equal, user_ns['x'], 1)
user_ns.pop('x', None)
paste('>>> x=2')
yield (nt.assert_equal, user_ns['x'], 2)
paste("""
>>> x = [1,2,3]
>>> y = []
>>> for i in x:
... y.append(i**2)
...
""")
yield (nt.assert_equal, user_ns['x'], [1,2,3])
yield (nt.assert_equal, user_ns['y'], [1,4,9])
except:
pass
# This should be in a finally clause, instead of the bare except above.
# Restore original hook
hooks.clipboard_get = original_clip
|
hujiajie/chromium-crosswalk | refs/heads/master | net/data/verify_certificate_chain_unittest/generate-unknown-root.py | 16 | #!/usr/bin/python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Certificate chain with 1 intermediary, but the root is not in trust store.
Verification is expected to fail because the final intermediary (Intermediary)
does not chain to a known root."""
import common
# Self-signed root certificate, which is NOT added to the trust store.
root = common.create_self_signed_root_certificate('Root')
# Intermediary certificate.
intermediary = common.create_intermediary_certificate('Intermediary', root)
# Target certificate.
target = common.create_end_entity_certificate('Target', intermediary)
chain = [target, intermediary]
trusted = [] # Note that this lacks |root|
time = common.DEFAULT_TIME
verify_result = False
common.write_test_file(__doc__, chain, trusted, time, verify_result)
|
codeofdusk/ProjectMagenta | refs/heads/master | doc/generator.py | 1 | # -*- coding: utf-8 -*-
import markdown
import os
from codecs import open as _open
import languageHandler
languageHandler.setLanguage("en")
import strings
# the list of supported language codes of TW Blue
languages = ["en", "es", "fr", "de", "it"]
#"eu", "ar", "ca", "es", "fi", "fr", "gl", "hu", "it", "pl", "pt", "ru", "tr"]
def generate_document(language):
reload(languageHandler)
languageHandler.setLanguage(language)
reload(strings)
markdown_file = markdown.markdown("\n".join(strings.documentation[1:]), extensions=["markdown.extensions.toc"])
first_html_block = """<!doctype html>
<html lang="%s">
<head>
<title>%s</title>
<meta charset="utf-8">
</head>
<body>
<header><h1>%s</h1></header>
""" % (language, strings.documentation[0], strings.documentation[0])
first_html_block = first_html_block+ markdown_file
first_html_block = first_html_block + "\n</body>\n</html>"
if not os.path.exists(language):
os.mkdir(language)
mdfile = _open("%s/manual.html" % language, "w", encoding="utf-8")
mdfile.write(first_html_block)
mdfile.close()
def create_documentation():
print("Creating documentation in the supported languages...\n")
for i in languages:
print("Creating documentation for: %s" % (i,))
generate_document(i)
print("Done")
create_documentation() |
sholtebeck/knarflog | refs/heads/master | lib/flask/logging.py | 3 | # -*- coding: utf-8 -*-
"""
flask.logging
~~~~~~~~~~~~~
:copyright: 2010 Pallets
:license: BSD-3-Clause
"""
from __future__ import absolute_import
import logging
import sys
import warnings
from werkzeug.local import LocalProxy
from .globals import request
@LocalProxy
def wsgi_errors_stream():
"""Find the most appropriate error stream for the application. If a request
is active, log to ``wsgi.errors``, otherwise use ``sys.stderr``.
If you configure your own :class:`logging.StreamHandler`, you may want to
use this for the stream. If you are using file or dict configuration and
can't import this directly, you can refer to it as
``ext://flask.logging.wsgi_errors_stream``.
"""
return request.environ["wsgi.errors"] if request else sys.stderr
def has_level_handler(logger):
"""Check if there is a handler in the logging chain that will handle the
given logger's :meth:`effective level <~logging.Logger.getEffectiveLevel>`.
"""
level = logger.getEffectiveLevel()
current = logger
while current:
if any(handler.level <= level for handler in current.handlers):
return True
if not current.propagate:
break
current = current.parent
return False
#: Log messages to :func:`~flask.logging.wsgi_errors_stream` with the format
#: ``[%(asctime)s] %(levelname)s in %(module)s: %(message)s``.
default_handler = logging.StreamHandler(wsgi_errors_stream)
default_handler.setFormatter(
logging.Formatter("[%(asctime)s] %(levelname)s in %(module)s: %(message)s")
)
def _has_config(logger):
"""Decide if a logger has direct configuration applied by checking
its properties against the defaults.
:param logger: The :class:`~logging.Logger` to inspect.
"""
return (
logger.level != logging.NOTSET
or logger.handlers
or logger.filters
or not logger.propagate
)
def create_logger(app):
"""Get the the Flask apps's logger and configure it if needed.
The logger name will be the same as
:attr:`app.import_name <flask.Flask.name>`.
When :attr:`~flask.Flask.debug` is enabled, set the logger level to
:data:`logging.DEBUG` if it is not set.
If there is no handler for the logger's effective level, add a
:class:`~logging.StreamHandler` for
:func:`~flask.logging.wsgi_errors_stream` with a basic format.
"""
logger = logging.getLogger(app.name)
# 1.1.0 changes name of logger, warn if config is detected for old
# name and not new name
for old_name in ("flask.app", "flask"):
old_logger = logging.getLogger(old_name)
if _has_config(old_logger) and not _has_config(logger):
warnings.warn(
"'app.logger' is named '{name}' for this application,"
" but configuration was found for '{old_name}', which"
" no longer has an effect. The logging configuration"
" should be moved to '{name}'.".format(name=app.name, old_name=old_name)
)
break
if app.debug and not logger.level:
logger.setLevel(logging.DEBUG)
if not has_level_handler(logger):
logger.addHandler(default_handler)
return logger
|
sug4rok/Servus | refs/heads/master | Servus/plugins/system_ip_online/__init__.py | 1054 | # coding=utf-8
|
nojhan/weboob-devel | refs/heads/master | modules/kiwibank/module.py | 4 | # -*- coding: utf-8 -*-
# Copyright(C) 2015 Cédric Félizard
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.capabilities.bank import CapBank, AccountNotFound
from weboob.capabilities.base import find_object
from weboob.tools.backend import Module, BackendConfig
from weboob.tools.value import ValueBackendPassword
from .browser import Kiwibank
__all__ = ['KiwibankModule']
class KiwibankModule(Module, CapBank):
NAME = 'kiwibank'
MAINTAINER = u'Cédric Félizard'
EMAIL = 'cedric@felizard.fr'
VERSION = '1.0.0'
LICENSE = 'AGPLv3+'
DESCRIPTION = u'Kiwibank'
CONFIG = BackendConfig(
ValueBackendPassword('username', label='Username', masked=False),
ValueBackendPassword('password', label='Password'),
)
BROWSER = Kiwibank
def create_default_browser(self):
return self.create_browser(self.config['username'].get(), self.config['password'].get())
def iter_accounts(self):
return self.browser.get_accounts()
def get_account(self, _id):
return find_object(self.browser.get_accounts(), id=_id, error=AccountNotFound)
def iter_history(self, account):
for transaction in self.browser.get_history(account):
yield transaction
|
mmatyas/skia | refs/heads/master | bench/tile_analyze.py | 198 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be found
# in the LICENSE file.
""" Analyze per-tile and viewport bench data, and output visualized results.
"""
__author__ = 'bensong@google.com (Ben Chen)'
import bench_util
import boto
import math
import optparse
import os
import re
import shutil
from oauth2_plugin import oauth2_plugin
# The default platform to analyze. Used when OPTION_PLATFORM flag is not set.
DEFAULT_PLATFORM = 'Nexus10_4-1_Float_Bench_32'
# Template for gsutil uri.
GOOGLE_STORAGE_URI_SCHEME = 'gs'
URI_BUCKET = 'chromium-skia-gm'
# Maximum number of rows of tiles to track for viewport covering.
MAX_TILE_ROWS = 8
# Constants for optparse.
USAGE_STRING = 'USAGE: %s [options]'
HOWTO_STRING = """
Note: to read bench data stored in Google Storage, you will need to set up the
corresponding Python library.
See http://developers.google.com/storage/docs/gspythonlibrary for details.
"""
HELP_STRING = """
For the given platform and revision number, find corresponding viewport and
tile benchmarks for each available picture bench, and output visualization and
analysis in HTML. By default it reads from Skia's Google Storage location where
bot data are stored, but if --dir is given, will read from local directory
instead.
""" + HOWTO_STRING
OPTION_DIR = '--dir'
OPTION_DIR_SHORT = '-d'
OPTION_REVISION = '--rev'
OPTION_REVISION_SHORT = '-r'
OPTION_PLATFORM = '--platform'
OPTION_PLATFORM_SHORT = '-p'
# Bench representation algorithm flag.
OPTION_REPRESENTATION_ALG = '--algorithm'
OPTION_REPRESENTATION_ALG_SHORT = '-a'
# Bench representation algorithm. See trunk/bench/bench_util.py.
REPRESENTATION_ALG = bench_util.ALGORITHM_25TH_PERCENTILE
# Constants for bench file matching.
GOOGLE_STORAGE_OBJECT_NAME_PREFIX = 'perfdata/Skia_'
BENCH_FILE_PREFIX_TEMPLATE = 'bench_r%s_'
TILING_FILE_NAME_INDICATOR = '_tile_'
VIEWPORT_FILE_NAME_INDICATOR = '_viewport_'
# Regular expression for matching format '<integer>x<integer>'.
DIMENSIONS_RE = '(\d+)x(\d+)'
# HTML and JS output templates.
HTML_PREFIX = """
<html><head><script type="text/javascript" src="https://www.google.com/jsapi">
</script><script type="text/javascript">google.load("visualization", "1.1",
{packages:["table"]});google.load("prototype", "1.6");</script>
<script type="text/javascript" src="https://systemsbiology-visualizations.googlecode.com/svn/trunk/src/main/js/load.js"></script><script
type="text/javascript"> systemsbiology.load("visualization", "1.0",
{packages:["bioheatmap"]});</script><script type="text/javascript">
google.setOnLoadCallback(drawVisualization); function drawVisualization() {
"""
HTML_SUFFIX = '</body></html>'
BAR_CHART_TEMPLATE = ('<img src="https://chart.googleapis.com/chart?chxr=0,0,'
'300&chxt=x&chbh=15,0&chs=600x150&cht=bhg&chco=80C65A,224499,FF0000,0A8C8A,'
'EBB671,DE091A,000000,00ffff&chds=a&chdl=%s&chd=t:%s" /><br>\n')
DRAW_OPTIONS = ('{passThroughBlack:false,useRowLabels:false,cellWidth:30,'
'cellHeight:30}')
TABLE_OPTIONS = '{showRowNumber:true,firstRowNumber:" ",sort:"disable"}'
def GetFiles(rev, bench_dir, platform):
"""Reads in bench files of interest into a dictionary.
If bench_dir is not empty, tries to read in local bench files; otherwise check
Google Storage. Filters files by revision (rev) and platform, and ignores
non-tile, non-viewport bench files.
Outputs dictionary [filename] -> [file content].
"""
file_dic = {}
if not bench_dir:
uri = boto.storage_uri(URI_BUCKET, GOOGLE_STORAGE_URI_SCHEME)
# The boto API does not allow prefix/wildcard matching of Google Storage
# objects. And Google Storage has a flat structure instead of being
# organized in directories. Therefore, we have to scan all objects in the
# Google Storage bucket to find the files we need, which is slow.
# The option of implementing prefix matching as in gsutil seems to be
# overkill, but gsutil does not provide an API ready for use. If speed is a
# big concern, we suggest copying bot bench data from Google Storage using
# gsutil and use --log_dir for fast local data reading.
for obj in uri.get_bucket():
# Filters out files of no interest.
if (not obj.name.startswith(GOOGLE_STORAGE_OBJECT_NAME_PREFIX) or
(obj.name.find(TILING_FILE_NAME_INDICATOR) < 0 and
obj.name.find(VIEWPORT_FILE_NAME_INDICATOR) < 0) or
obj.name.find(platform) < 0 or
obj.name.find(BENCH_FILE_PREFIX_TEMPLATE % rev) < 0):
continue
file_dic[
obj.name[obj.name.rfind('/') + 1 : ]] = obj.get_contents_as_string()
else:
for f in os.listdir(bench_dir):
if (not os.path.isfile(os.path.join(bench_dir, f)) or
(f.find(TILING_FILE_NAME_INDICATOR) < 0 and
f.find(VIEWPORT_FILE_NAME_INDICATOR) < 0) or
not f.startswith(BENCH_FILE_PREFIX_TEMPLATE % rev)):
continue
file_dic[f] = open(os.path.join(bench_dir, f)).read()
if not file_dic:
raise Exception('No bench file found in "%s" or Google Storage.' %
bench_dir)
return file_dic
def GetTileMatrix(layout, tile_size, values, viewport):
"""For the given tile layout and per-tile bench values, returns a matrix of
bench values with tiles outside the given viewport set to 0.
layout, tile_size and viewport are given in string of format <w>x<h>, where
<w> is viewport width or number of tile columns, and <h> is viewport height or
number of tile rows. We truncate tile rows to MAX_TILE_ROWS to adjust for very
long skp's.
values: per-tile benches ordered row-by-row, starting from the top-left tile.
Returns [sum, matrix] where sum is the total bench tile time that covers the
viewport, and matrix is used for visualizing the tiles.
"""
[tile_cols, tile_rows] = [int(i) for i in layout.split('x')]
[tile_x, tile_y] = [int(i) for i in tile_size.split('x')]
[viewport_x, viewport_y] = [int(i) for i in viewport.split('x')]
viewport_cols = int(math.ceil(viewport_x * 1.0 / tile_x))
viewport_rows = int(math.ceil(viewport_y * 1.0 / tile_y))
truncated_tile_rows = min(tile_rows, MAX_TILE_ROWS)
viewport_tile_sum = 0
matrix = [[0 for y in range(tile_cols)] for x in range(truncated_tile_rows)]
for y in range(min(viewport_cols, tile_cols)):
for x in range(min(truncated_tile_rows, viewport_rows)):
matrix[x][y] = values[x * tile_cols + y]
viewport_tile_sum += values[x * tile_cols + y]
return [viewport_tile_sum, matrix]
def GetTileVisCodes(suffix, matrix):
"""Generates and returns strings of [js_codes, row1, row2] which are codes for
visualizing the benches from the given tile config and matrix data.
row1 is used for the first row of heatmaps; row2 is for corresponding tables.
suffix is only used to avoid name conflicts in the whole html output.
"""
this_js = 'var data_%s=new google.visualization.DataTable();' % suffix
for i in range(len(matrix[0])):
this_js += 'data_%s.addColumn("number","%s");' % (suffix, i)
this_js += 'data_%s.addRows(%s);' % (suffix, str(matrix))
# Adds heatmap chart.
this_js += ('var heat_%s=new org.systemsbiology.visualization' % suffix +
'.BioHeatMap(document.getElementById("%s"));' % suffix +
'heat_%s.draw(data_%s,%s);' % (suffix, suffix, DRAW_OPTIONS))
# Adds data table chart.
this_js += ('var table_%s=new google.visualization.Table(document.' % suffix +
'getElementById("t%s"));table_%s.draw(data_%s,%s);\n' % (
suffix, suffix, suffix, TABLE_OPTIONS))
table_row1 = '<td>%s<div id="%s"></div></td>' % (suffix, suffix)
table_row2 = '<td><div id="t%s"></div></td>' % suffix
return [this_js, table_row1, table_row2]
def OutputTileAnalysis(rev, representation_alg, bench_dir, platform):
"""Reads skp bench data and outputs tile vs. viewport analysis for the given
platform.
Ignores data with revisions other than rev. If bench_dir is not empty, read
from the local directory instead of Google Storage.
Uses the provided representation_alg for calculating bench representations.
Returns (js_codes, body_codes): strings of js/html codes for stats and
visualization.
"""
js_codes = ''
body_codes = ('}</script></head><body>'
'<h3>PLATFORM: %s REVISION: %s</h3><br>' % (platform, rev))
bench_dic = {} # [bench][config] -> [layout, [values]]
file_dic = GetFiles(rev, bench_dir, platform)
for f in file_dic:
for point in bench_util.parse('', file_dic[f].split('\n'),
representation_alg):
if point.time_type: # Ignores non-walltime time_type.
continue
bench = point.bench.replace('.skp', '')
config = point.config.replace('simple_', '')
components = config.split('_')
if components[0] == 'viewport':
bench_dic.setdefault(bench, {})[config] = [components[1], [point.time]]
else: # Stores per-tile benches.
bench_dic.setdefault(bench, {})[config] = [
point.tile_layout, point.per_tile_values]
benches = bench_dic.keys()
benches.sort()
for bench in benches:
body_codes += '<h4>%s</h4><br><table><tr>' % bench
heat_plots = '' # For table row of heatmap plots.
table_plots = '' # For table row of data table plots.
# For bar plot legends and values in URL string.
legends = ''
values = ''
keys = bench_dic[bench].keys()
keys.sort()
if not keys[-1].startswith('viewport'): # No viewport to analyze; skip.
continue
else:
# Extracts viewport size, which for all viewport configs is the same.
viewport = bench_dic[bench][keys[-1]][0]
for config in keys:
[layout, value_li] = bench_dic[bench][config]
if config.startswith('tile_'): # For per-tile data, visualize tiles.
tile_size = config.split('_')[1]
if (not re.search(DIMENSIONS_RE, layout) or
not re.search(DIMENSIONS_RE, tile_size) or
not re.search(DIMENSIONS_RE, viewport)):
continue # Skip unrecognized formats.
[viewport_tile_sum, matrix] = GetTileMatrix(
layout, tile_size, value_li, viewport)
values += '%s|' % viewport_tile_sum
[this_js, row1, row2] = GetTileVisCodes(config + '_' + bench, matrix)
heat_plots += row1
table_plots += row2
js_codes += this_js
else: # For viewport data, there is only one element in value_li.
values += '%s|' % sum(value_li)
legends += '%s:%s|' % (config, sum(value_li))
body_codes += (heat_plots + '</tr><tr>' + table_plots + '</tr></table>' +
'<br>' + BAR_CHART_TEMPLATE % (legends[:-1], values[:-1]))
return (js_codes, body_codes)
def main():
"""Parses flags and outputs expected Skia picture bench results."""
parser = optparse.OptionParser(USAGE_STRING % '%prog' + HELP_STRING)
parser.add_option(OPTION_PLATFORM_SHORT, OPTION_PLATFORM,
dest='plat', default=DEFAULT_PLATFORM,
help='Platform to analyze. Set to DEFAULT_PLATFORM if not given.')
parser.add_option(OPTION_REVISION_SHORT, OPTION_REVISION,
dest='rev',
help='(Mandatory) revision number to analyze.')
parser.add_option(OPTION_DIR_SHORT, OPTION_DIR,
dest='log_dir', default='',
help=('(Optional) local directory where bench log files reside. If left '
'empty (by default), will try to read from Google Storage.'))
parser.add_option(OPTION_REPRESENTATION_ALG_SHORT, OPTION_REPRESENTATION_ALG,
dest='alg', default=REPRESENTATION_ALG,
help=('Bench representation algorithm. '
'Default to "%s".' % REPRESENTATION_ALG))
(options, args) = parser.parse_args()
if not (options.rev and options.rev.isdigit()):
parser.error('Please provide correct mandatory flag %s' % OPTION_REVISION)
return
rev = int(options.rev)
(js_codes, body_codes) = OutputTileAnalysis(
rev, options.alg, options.log_dir, options.plat)
print HTML_PREFIX + js_codes + body_codes + HTML_SUFFIX
if '__main__' == __name__:
main()
|
trondeau/gnuradio-old | refs/heads/master | gr-utils/python/utils/plot_fft_base.py | 53 | #!/usr/bin/env python
#
# Copyright 2007,2008,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
try:
import scipy
from scipy import fftpack
except ImportError:
print "Please install SciPy to run this script (http://www.scipy.org/)"
raise SystemExit, 1
try:
from pylab import *
except ImportError:
print "Please install Matplotlib to run this script (http://matplotlib.sourceforge.net/)"
raise SystemExit, 1
from optparse import OptionParser
class plot_fft_base:
def __init__(self, datatype, filename, options):
self.hfile = open(filename, "r")
self.block_length = options.block
self.start = options.start
self.sample_rate = options.sample_rate
self.datatype = getattr(scipy, datatype)
self.sizeof_data = self.datatype().nbytes # number of bytes per sample in file
self.axis_font_size = 16
self.label_font_size = 18
self.title_font_size = 20
self.text_size = 22
# Setup PLOT
self.fig = figure(1, figsize=(16, 12), facecolor='w')
rcParams['xtick.labelsize'] = self.axis_font_size
rcParams['ytick.labelsize'] = self.axis_font_size
self.text_file = figtext(0.10, 0.94, ("File: %s" % filename), weight="heavy", size=self.text_size)
self.text_file_pos = figtext(0.10, 0.88, "File Position: ", weight="heavy", size=self.text_size)
self.text_block = figtext(0.35, 0.88, ("Block Size: %d" % self.block_length),
weight="heavy", size=self.text_size)
self.text_sr = figtext(0.60, 0.88, ("Sample Rate: %.2f" % self.sample_rate),
weight="heavy", size=self.text_size)
self.make_plots()
self.button_left_axes = self.fig.add_axes([0.45, 0.01, 0.05, 0.05], frameon=True)
self.button_left = Button(self.button_left_axes, "<")
self.button_left_callback = self.button_left.on_clicked(self.button_left_click)
self.button_right_axes = self.fig.add_axes([0.50, 0.01, 0.05, 0.05], frameon=True)
self.button_right = Button(self.button_right_axes, ">")
self.button_right_callback = self.button_right.on_clicked(self.button_right_click)
self.xlim = self.sp_iq.get_xlim()
self.manager = get_current_fig_manager()
connect('draw_event', self.zoom)
connect('key_press_event', self.click)
show()
def get_data(self):
self.position = self.hfile.tell()/self.sizeof_data
self.text_file_pos.set_text("File Position: %d" % (self.position))
try:
self.iq = scipy.fromfile(self.hfile, dtype=self.datatype, count=self.block_length)
except MemoryError:
print "End of File"
else:
self.iq_fft = self.dofft(self.iq)
tstep = 1.0 / self.sample_rate
#self.time = scipy.array([tstep*(self.position + i) for i in xrange(len(self.iq))])
self.time = scipy.array([tstep*(i) for i in xrange(len(self.iq))])
self.freq = self.calc_freq(self.time, self.sample_rate)
def dofft(self, iq):
N = len(iq)
iq_fft = scipy.fftpack.fftshift(scipy.fft(iq)) # fft and shift axis
iq_fft = 20*scipy.log10(abs((iq_fft+1e-15)/N)) # convert to decibels, adjust power
# adding 1e-15 (-300 dB) to protect against value errors if an item in iq_fft is 0
return iq_fft
def calc_freq(self, time, sample_rate):
N = len(time)
Fs = 1.0 / (time.max() - time.min())
Fn = 0.5 * sample_rate
freq = scipy.array([-Fn + i*Fs for i in xrange(N)])
return freq
def make_plots(self):
# if specified on the command-line, set file pointer
self.hfile.seek(self.sizeof_data*self.start, 1)
# Subplot for real and imaginary parts of signal
self.sp_iq = self.fig.add_subplot(2,2,1, position=[0.075, 0.2, 0.4, 0.6])
self.sp_iq.set_title(("I&Q"), fontsize=self.title_font_size, fontweight="bold")
self.sp_iq.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold")
self.sp_iq.set_ylabel("Amplitude (V)", fontsize=self.label_font_size, fontweight="bold")
# Subplot for FFT plot
self.sp_fft = self.fig.add_subplot(2,2,2, position=[0.575, 0.2, 0.4, 0.6])
self.sp_fft.set_title(("FFT"), fontsize=self.title_font_size, fontweight="bold")
self.sp_fft.set_xlabel("Frequency (Hz)", fontsize=self.label_font_size, fontweight="bold")
self.sp_fft.set_ylabel("Power Spectrum (dBm)", fontsize=self.label_font_size, fontweight="bold")
self.get_data()
self.plot_iq = self.sp_iq.plot([], 'bo-') # make plot for reals
self.plot_iq += self.sp_iq.plot([], 'ro-') # make plot for imags
self.draw_time() # draw the plot
self.plot_fft = self.sp_fft.plot([], 'bo-') # make plot for FFT
self.draw_fft() # draw the plot
draw()
def draw_time(self):
reals = self.iq.real
imags = self.iq.imag
self.plot_iq[0].set_data([self.time, reals])
self.plot_iq[1].set_data([self.time, imags])
self.sp_iq.set_xlim(self.time.min(), self.time.max())
self.sp_iq.set_ylim([1.5*min([reals.min(), imags.min()]),
1.5*max([reals.max(), imags.max()])])
def draw_fft(self):
self.plot_fft[0].set_data([self.freq, self.iq_fft])
self.sp_fft.set_xlim(self.freq.min(), self.freq.max())
self.sp_fft.set_ylim([self.iq_fft.min()-10, self.iq_fft.max()+10])
def update_plots(self):
self.draw_time()
self.draw_fft()
self.xlim = self.sp_iq.get_xlim()
draw()
def zoom(self, event):
newxlim = scipy.array(self.sp_iq.get_xlim())
curxlim = scipy.array(self.xlim)
if(newxlim[0] != curxlim[0] or newxlim[1] != curxlim[1]):
self.xlim = newxlim
#xmin = max(0, int(ceil(self.sample_rate*(self.xlim[0] - self.position))))
#xmax = min(int(ceil(self.sample_rate*(self.xlim[1] - self.position))), len(self.iq))
xmin = max(0, int(ceil(self.sample_rate*(self.xlim[0]))))
xmax = min(int(ceil(self.sample_rate*(self.xlim[1]))), len(self.iq))
iq = self.iq[xmin : xmax]
time = self.time[xmin : xmax]
iq_fft = self.dofft(iq)
freq = self.calc_freq(time, self.sample_rate)
self.plot_fft[0].set_data(freq, iq_fft)
self.sp_fft.axis([freq.min(), freq.max(),
iq_fft.min()-10, iq_fft.max()+10])
draw()
def click(self, event):
forward_valid_keys = [" ", "down", "right"]
backward_valid_keys = ["up", "left"]
if(find(event.key, forward_valid_keys)):
self.step_forward()
elif(find(event.key, backward_valid_keys)):
self.step_backward()
def button_left_click(self, event):
self.step_backward()
def button_right_click(self, event):
self.step_forward()
def step_forward(self):
self.get_data()
self.update_plots()
def step_backward(self):
# Step back in file position
if(self.hfile.tell() >= 2*self.sizeof_data*self.block_length ):
self.hfile.seek(-2*self.sizeof_data*self.block_length, 1)
else:
self.hfile.seek(-self.hfile.tell(),1)
self.get_data()
self.update_plots()
@staticmethod
def setup_options():
usage="%prog: [options] input_filename"
description = "Takes a GNU Radio complex binary file and displays the I&Q data versus time as well as the frequency domain (FFT) plot. The y-axis values are plotted assuming volts as the amplitude of the I&Q streams and converted into dBm in the frequency domain (the 1/N power adjustment out of the FFT is performed internally). The script plots a certain block of data at a time, specified on the command line as -B or --block. This value defaults to 1000. The start position in the file can be set by specifying -s or --start and defaults to 0 (the start of the file). By default, the system assumes a sample rate of 1, so in time, each sample is plotted versus the sample number. To set a true time and frequency axis, set the sample rate (-R or --sample-rate) to the sample rate used when capturing the samples."
parser = OptionParser(conflict_handler="resolve", usage=usage, description=description)
parser.add_option("-d", "--data-type", type="string", default="complex64",
help="Specify the data type (complex64, float32, (u)int32, (u)int16, (u)int8) [default=%default]")
parser.add_option("-B", "--block", type="int", default=1000,
help="Specify the block size [default=%default]")
parser.add_option("-s", "--start", type="int", default=0,
help="Specify where to start in the file [default=%default]")
parser.add_option("-R", "--sample-rate", type="float", default=1.0,
help="Set the sampler rate of the data [default=%default]")
return parser
def find(item_in, list_search):
try:
return list_search.index(item_in) != None
except ValueError:
return False
def main():
parser = plot_fft_base.setup_options()
(options, args) = parser.parse_args ()
if len(args) != 1:
parser.print_help()
raise SystemExit, 1
filename = args[0]
dc = plot_fft_base(options.data_type, filename, options)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
|
kontais/EFI-MIPS | refs/heads/master | ToolKit/cmds/python/Lib/xmllib.py | 160 | """A parser for XML, using the derived class as static DTD."""
# Author: Sjoerd Mullender.
import re
import string
import warnings
warnings.warn("The xmllib module is obsolete. Use xml.sax instead.", DeprecationWarning)
del warnings
version = '0.3'
class Error(RuntimeError):
pass
# Regular expressions used for parsing
_S = '[ \t\r\n]+' # white space
_opS = '[ \t\r\n]*' # optional white space
_Name = '[a-zA-Z_:][-a-zA-Z0-9._:]*' # valid XML name
_QStr = "(?:'[^']*'|\"[^\"]*\")" # quoted XML string
illegal = re.compile('[^\t\r\n -\176\240-\377]') # illegal chars in content
interesting = re.compile('[]&<]')
amp = re.compile('&')
ref = re.compile('&(' + _Name + '|#[0-9]+|#x[0-9a-fA-F]+)[^-a-zA-Z0-9._:]')
entityref = re.compile('&(?P<name>' + _Name + ')[^-a-zA-Z0-9._:]')
charref = re.compile('&#(?P<char>[0-9]+[^0-9]|x[0-9a-fA-F]+[^0-9a-fA-F])')
space = re.compile(_S + '$')
newline = re.compile('\n')
attrfind = re.compile(
_S + '(?P<name>' + _Name + ')'
'(' + _opS + '=' + _opS +
'(?P<value>'+_QStr+'|[-a-zA-Z0-9.:+*%?!\(\)_#=~]+))?')
starttagopen = re.compile('<' + _Name)
starttagend = re.compile(_opS + '(?P<slash>/?)>')
starttagmatch = re.compile('<(?P<tagname>'+_Name+')'
'(?P<attrs>(?:'+attrfind.pattern+')*)'+
starttagend.pattern)
endtagopen = re.compile('</')
endbracket = re.compile(_opS + '>')
endbracketfind = re.compile('(?:[^>\'"]|'+_QStr+')*>')
tagfind = re.compile(_Name)
cdataopen = re.compile(r'<!\[CDATA\[')
cdataclose = re.compile(r'\]\]>')
# this matches one of the following:
# SYSTEM SystemLiteral
# PUBLIC PubidLiteral SystemLiteral
_SystemLiteral = '(?P<%s>'+_QStr+')'
_PublicLiteral = '(?P<%s>"[-\'\(\)+,./:=?;!*#@$_%% \n\ra-zA-Z0-9]*"|' \
"'[-\(\)+,./:=?;!*#@$_%% \n\ra-zA-Z0-9]*')"
_ExternalId = '(?:SYSTEM|' \
'PUBLIC'+_S+_PublicLiteral%'pubid'+ \
')'+_S+_SystemLiteral%'syslit'
doctype = re.compile('<!DOCTYPE'+_S+'(?P<name>'+_Name+')'
'(?:'+_S+_ExternalId+')?'+_opS)
xmldecl = re.compile('<\?xml'+_S+
'version'+_opS+'='+_opS+'(?P<version>'+_QStr+')'+
'(?:'+_S+'encoding'+_opS+'='+_opS+
"(?P<encoding>'[A-Za-z][-A-Za-z0-9._]*'|"
'"[A-Za-z][-A-Za-z0-9._]*"))?'
'(?:'+_S+'standalone'+_opS+'='+_opS+
'(?P<standalone>\'(?:yes|no)\'|"(?:yes|no)"))?'+
_opS+'\?>')
procopen = re.compile(r'<\?(?P<proc>' + _Name + ')' + _opS)
procclose = re.compile(_opS + r'\?>')
commentopen = re.compile('<!--')
commentclose = re.compile('-->')
doubledash = re.compile('--')
attrtrans = string.maketrans(' \r\n\t', ' ')
# definitions for XML namespaces
_NCName = '[a-zA-Z_][-a-zA-Z0-9._]*' # XML Name, minus the ":"
ncname = re.compile(_NCName + '$')
qname = re.compile('(?:(?P<prefix>' + _NCName + '):)?' # optional prefix
'(?P<local>' + _NCName + ')$')
xmlns = re.compile('xmlns(?::(?P<ncname>'+_NCName+'))?$')
# XML parser base class -- find tags and call handler functions.
# Usage: p = XMLParser(); p.feed(data); ...; p.close().
# The dtd is defined by deriving a class which defines methods with
# special names to handle tags: start_foo and end_foo to handle <foo>
# and </foo>, respectively. The data between tags is passed to the
# parser by calling self.handle_data() with some data as argument (the
# data may be split up in arbitrary chunks).
class XMLParser:
attributes = {} # default, to be overridden
elements = {} # default, to be overridden
# parsing options, settable using keyword args in __init__
__accept_unquoted_attributes = 0
__accept_missing_endtag_name = 0
__map_case = 0
__accept_utf8 = 0
__translate_attribute_references = 1
# Interface -- initialize and reset this instance
def __init__(self, **kw):
self.__fixed = 0
if 'accept_unquoted_attributes' in kw:
self.__accept_unquoted_attributes = kw['accept_unquoted_attributes']
if 'accept_missing_endtag_name' in kw:
self.__accept_missing_endtag_name = kw['accept_missing_endtag_name']
if 'map_case' in kw:
self.__map_case = kw['map_case']
if 'accept_utf8' in kw:
self.__accept_utf8 = kw['accept_utf8']
if 'translate_attribute_references' in kw:
self.__translate_attribute_references = kw['translate_attribute_references']
self.reset()
def __fixelements(self):
self.__fixed = 1
self.elements = {}
self.__fixdict(self.__dict__)
self.__fixclass(self.__class__)
def __fixclass(self, kl):
self.__fixdict(kl.__dict__)
for k in kl.__bases__:
self.__fixclass(k)
def __fixdict(self, dict):
for key in dict.keys():
if key[:6] == 'start_':
tag = key[6:]
start, end = self.elements.get(tag, (None, None))
if start is None:
self.elements[tag] = getattr(self, key), end
elif key[:4] == 'end_':
tag = key[4:]
start, end = self.elements.get(tag, (None, None))
if end is None:
self.elements[tag] = start, getattr(self, key)
# Interface -- reset this instance. Loses all unprocessed data
def reset(self):
self.rawdata = ''
self.stack = []
self.nomoretags = 0
self.literal = 0
self.lineno = 1
self.__at_start = 1
self.__seen_doctype = None
self.__seen_starttag = 0
self.__use_namespaces = 0
self.__namespaces = {'xml':None} # xml is implicitly declared
# backward compatibility hack: if elements not overridden,
# fill it in ourselves
if self.elements is XMLParser.elements:
self.__fixelements()
# For derived classes only -- enter literal mode (CDATA) till EOF
def setnomoretags(self):
self.nomoretags = self.literal = 1
# For derived classes only -- enter literal mode (CDATA)
def setliteral(self, *args):
self.literal = 1
# Interface -- feed some data to the parser. Call this as
# often as you want, with as little or as much text as you
# want (may include '\n'). (This just saves the text, all the
# processing is done by goahead().)
def feed(self, data):
self.rawdata = self.rawdata + data
self.goahead(0)
# Interface -- handle the remaining data
def close(self):
self.goahead(1)
if self.__fixed:
self.__fixed = 0
# remove self.elements so that we don't leak
del self.elements
# Interface -- translate references
def translate_references(self, data, all = 1):
if not self.__translate_attribute_references:
return data
i = 0
while 1:
res = amp.search(data, i)
if res is None:
return data
s = res.start(0)
res = ref.match(data, s)
if res is None:
self.syntax_error("bogus `&'")
i = s+1
continue
i = res.end(0)
str = res.group(1)
rescan = 0
if str[0] == '#':
if str[1] == 'x':
str = chr(int(str[2:], 16))
else:
str = chr(int(str[1:]))
if data[i - 1] != ';':
self.syntax_error("`;' missing after char reference")
i = i-1
elif all:
if str in self.entitydefs:
str = self.entitydefs[str]
rescan = 1
elif data[i - 1] != ';':
self.syntax_error("bogus `&'")
i = s + 1 # just past the &
continue
else:
self.syntax_error("reference to unknown entity `&%s;'" % str)
str = '&' + str + ';'
elif data[i - 1] != ';':
self.syntax_error("bogus `&'")
i = s + 1 # just past the &
continue
# when we get here, str contains the translated text and i points
# to the end of the string that is to be replaced
data = data[:s] + str + data[i:]
if rescan:
i = s
else:
i = s + len(str)
# Interface - return a dictionary of all namespaces currently valid
def getnamespace(self):
nsdict = {}
for t, d, nst in self.stack:
nsdict.update(d)
return nsdict
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
if i > 0:
self.__at_start = 0
if self.nomoretags:
data = rawdata[i:n]
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
i = n
break
res = interesting.search(rawdata, i)
if res:
j = res.start(0)
else:
j = n
if i < j:
data = rawdata[i:j]
if self.__at_start and space.match(data) is None:
self.syntax_error('illegal data at start of file')
self.__at_start = 0
if not self.stack and space.match(data) is None:
self.syntax_error('data not in content')
if not self.__accept_utf8 and illegal.search(data):
self.syntax_error('illegal character in content')
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
i = j
if i == n: break
if rawdata[i] == '<':
if starttagopen.match(rawdata, i):
if self.literal:
data = rawdata[i]
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
i = i+1
continue
k = self.parse_starttag(i)
if k < 0: break
self.__seen_starttag = 1
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
if endtagopen.match(rawdata, i):
k = self.parse_endtag(i)
if k < 0: break
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
if commentopen.match(rawdata, i):
if self.literal:
data = rawdata[i]
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
i = i+1
continue
k = self.parse_comment(i)
if k < 0: break
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
if cdataopen.match(rawdata, i):
k = self.parse_cdata(i)
if k < 0: break
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
res = xmldecl.match(rawdata, i)
if res:
if not self.__at_start:
self.syntax_error("<?xml?> declaration not at start of document")
version, encoding, standalone = res.group('version',
'encoding',
'standalone')
if version[1:-1] != '1.0':
raise Error('only XML version 1.0 supported')
if encoding: encoding = encoding[1:-1]
if standalone: standalone = standalone[1:-1]
self.handle_xml(encoding, standalone)
i = res.end(0)
continue
res = procopen.match(rawdata, i)
if res:
k = self.parse_proc(i)
if k < 0: break
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
res = doctype.match(rawdata, i)
if res:
if self.literal:
data = rawdata[i]
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
i = i+1
continue
if self.__seen_doctype:
self.syntax_error('multiple DOCTYPE elements')
if self.__seen_starttag:
self.syntax_error('DOCTYPE not at beginning of document')
k = self.parse_doctype(res)
if k < 0: break
self.__seen_doctype = res.group('name')
if self.__map_case:
self.__seen_doctype = self.__seen_doctype.lower()
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
elif rawdata[i] == '&':
if self.literal:
data = rawdata[i]
self.handle_data(data)
i = i+1
continue
res = charref.match(rawdata, i)
if res is not None:
i = res.end(0)
if rawdata[i-1] != ';':
self.syntax_error("`;' missing in charref")
i = i-1
if not self.stack:
self.syntax_error('data not in content')
self.handle_charref(res.group('char')[:-1])
self.lineno = self.lineno + res.group(0).count('\n')
continue
res = entityref.match(rawdata, i)
if res is not None:
i = res.end(0)
if rawdata[i-1] != ';':
self.syntax_error("`;' missing in entityref")
i = i-1
name = res.group('name')
if self.__map_case:
name = name.lower()
if name in self.entitydefs:
self.rawdata = rawdata = rawdata[:res.start(0)] + self.entitydefs[name] + rawdata[i:]
n = len(rawdata)
i = res.start(0)
else:
self.unknown_entityref(name)
self.lineno = self.lineno + res.group(0).count('\n')
continue
elif rawdata[i] == ']':
if self.literal:
data = rawdata[i]
self.handle_data(data)
i = i+1
continue
if n-i < 3:
break
if cdataclose.match(rawdata, i):
self.syntax_error("bogus `]]>'")
self.handle_data(rawdata[i])
i = i+1
continue
else:
raise Error('neither < nor & ??')
# We get here only if incomplete matches but
# nothing else
break
# end while
if i > 0:
self.__at_start = 0
if end and i < n:
data = rawdata[i]
self.syntax_error("bogus `%s'" % data)
if not self.__accept_utf8 and illegal.search(data):
self.syntax_error('illegal character in content')
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
self.rawdata = rawdata[i+1:]
return self.goahead(end)
self.rawdata = rawdata[i:]
if end:
if not self.__seen_starttag:
self.syntax_error('no elements in file')
if self.stack:
self.syntax_error('missing end tags')
while self.stack:
self.finish_endtag(self.stack[-1][0])
# Internal -- parse comment, return length or -1 if not terminated
def parse_comment(self, i):
rawdata = self.rawdata
if rawdata[i:i+4] != '<!--':
raise Error('unexpected call to handle_comment')
res = commentclose.search(rawdata, i+4)
if res is None:
return -1
if doubledash.search(rawdata, i+4, res.start(0)):
self.syntax_error("`--' inside comment")
if rawdata[res.start(0)-1] == '-':
self.syntax_error('comment cannot end in three dashes')
if not self.__accept_utf8 and \
illegal.search(rawdata, i+4, res.start(0)):
self.syntax_error('illegal character in comment')
self.handle_comment(rawdata[i+4: res.start(0)])
return res.end(0)
# Internal -- handle DOCTYPE tag, return length or -1 if not terminated
def parse_doctype(self, res):
rawdata = self.rawdata
n = len(rawdata)
name = res.group('name')
if self.__map_case:
name = name.lower()
pubid, syslit = res.group('pubid', 'syslit')
if pubid is not None:
pubid = pubid[1:-1] # remove quotes
pubid = ' '.join(pubid.split()) # normalize
if syslit is not None: syslit = syslit[1:-1] # remove quotes
j = k = res.end(0)
if k >= n:
return -1
if rawdata[k] == '[':
level = 0
k = k+1
dq = sq = 0
while k < n:
c = rawdata[k]
if not sq and c == '"':
dq = not dq
elif not dq and c == "'":
sq = not sq
elif sq or dq:
pass
elif level <= 0 and c == ']':
res = endbracket.match(rawdata, k+1)
if res is None:
return -1
self.handle_doctype(name, pubid, syslit, rawdata[j+1:k])
return res.end(0)
elif c == '<':
level = level + 1
elif c == '>':
level = level - 1
if level < 0:
self.syntax_error("bogus `>' in DOCTYPE")
k = k+1
res = endbracketfind.match(rawdata, k)
if res is None:
return -1
if endbracket.match(rawdata, k) is None:
self.syntax_error('garbage in DOCTYPE')
self.handle_doctype(name, pubid, syslit, None)
return res.end(0)
# Internal -- handle CDATA tag, return length or -1 if not terminated
def parse_cdata(self, i):
rawdata = self.rawdata
if rawdata[i:i+9] != '<![CDATA[':
raise Error('unexpected call to parse_cdata')
res = cdataclose.search(rawdata, i+9)
if res is None:
return -1
if not self.__accept_utf8 and \
illegal.search(rawdata, i+9, res.start(0)):
self.syntax_error('illegal character in CDATA')
if not self.stack:
self.syntax_error('CDATA not in content')
self.handle_cdata(rawdata[i+9:res.start(0)])
return res.end(0)
__xml_namespace_attributes = {'ns':None, 'src':None, 'prefix':None}
# Internal -- handle a processing instruction tag
def parse_proc(self, i):
rawdata = self.rawdata
end = procclose.search(rawdata, i)
if end is None:
return -1
j = end.start(0)
if not self.__accept_utf8 and illegal.search(rawdata, i+2, j):
self.syntax_error('illegal character in processing instruction')
res = tagfind.match(rawdata, i+2)
if res is None:
raise Error('unexpected call to parse_proc')
k = res.end(0)
name = res.group(0)
if self.__map_case:
name = name.lower()
if name == 'xml:namespace':
self.syntax_error('old-fashioned namespace declaration')
self.__use_namespaces = -1
# namespace declaration
# this must come after the <?xml?> declaration (if any)
# and before the <!DOCTYPE> (if any).
if self.__seen_doctype or self.__seen_starttag:
self.syntax_error('xml:namespace declaration too late in document')
attrdict, namespace, k = self.parse_attributes(name, k, j)
if namespace:
self.syntax_error('namespace declaration inside namespace declaration')
for attrname in attrdict.keys():
if not attrname in self.__xml_namespace_attributes:
self.syntax_error("unknown attribute `%s' in xml:namespace tag" % attrname)
if not 'ns' in attrdict or not 'prefix' in attrdict:
self.syntax_error('xml:namespace without required attributes')
prefix = attrdict.get('prefix')
if ncname.match(prefix) is None:
self.syntax_error('xml:namespace illegal prefix value')
return end.end(0)
if prefix in self.__namespaces:
self.syntax_error('xml:namespace prefix not unique')
self.__namespaces[prefix] = attrdict['ns']
else:
if name.lower() == 'xml':
self.syntax_error('illegal processing instruction target name')
self.handle_proc(name, rawdata[k:j])
return end.end(0)
# Internal -- parse attributes between i and j
def parse_attributes(self, tag, i, j):
rawdata = self.rawdata
attrdict = {}
namespace = {}
while i < j:
res = attrfind.match(rawdata, i)
if res is None:
break
attrname, attrvalue = res.group('name', 'value')
if self.__map_case:
attrname = attrname.lower()
i = res.end(0)
if attrvalue is None:
self.syntax_error("no value specified for attribute `%s'" % attrname)
attrvalue = attrname
elif attrvalue[:1] == "'" == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
elif not self.__accept_unquoted_attributes:
self.syntax_error("attribute `%s' value not quoted" % attrname)
res = xmlns.match(attrname)
if res is not None:
# namespace declaration
ncname = res.group('ncname')
namespace[ncname or ''] = attrvalue or None
if not self.__use_namespaces:
self.__use_namespaces = len(self.stack)+1
continue
if '<' in attrvalue:
self.syntax_error("`<' illegal in attribute value")
if attrname in attrdict:
self.syntax_error("attribute `%s' specified twice" % attrname)
attrvalue = attrvalue.translate(attrtrans)
attrdict[attrname] = self.translate_references(attrvalue)
return attrdict, namespace, i
# Internal -- handle starttag, return length or -1 if not terminated
def parse_starttag(self, i):
rawdata = self.rawdata
# i points to start of tag
end = endbracketfind.match(rawdata, i+1)
if end is None:
return -1
tag = starttagmatch.match(rawdata, i)
if tag is None or tag.end(0) != end.end(0):
self.syntax_error('garbage in starttag')
return end.end(0)
nstag = tagname = tag.group('tagname')
if self.__map_case:
nstag = tagname = nstag.lower()
if not self.__seen_starttag and self.__seen_doctype and \
tagname != self.__seen_doctype:
self.syntax_error('starttag does not match DOCTYPE')
if self.__seen_starttag and not self.stack:
self.syntax_error('multiple elements on top level')
k, j = tag.span('attrs')
attrdict, nsdict, k = self.parse_attributes(tagname, k, j)
self.stack.append((tagname, nsdict, nstag))
if self.__use_namespaces:
res = qname.match(tagname)
else:
res = None
if res is not None:
prefix, nstag = res.group('prefix', 'local')
if prefix is None:
prefix = ''
ns = None
for t, d, nst in self.stack:
if prefix in d:
ns = d[prefix]
if ns is None and prefix != '':
ns = self.__namespaces.get(prefix)
if ns is not None:
nstag = ns + ' ' + nstag
elif prefix != '':
nstag = prefix + ':' + nstag # undo split
self.stack[-1] = tagname, nsdict, nstag
# translate namespace of attributes
attrnamemap = {} # map from new name to old name (used for error reporting)
for key in attrdict.keys():
attrnamemap[key] = key
if self.__use_namespaces:
nattrdict = {}
for key, val in attrdict.items():
okey = key
res = qname.match(key)
if res is not None:
aprefix, key = res.group('prefix', 'local')
if self.__map_case:
key = key.lower()
if aprefix is not None:
ans = None
for t, d, nst in self.stack:
if aprefix in d:
ans = d[aprefix]
if ans is None:
ans = self.__namespaces.get(aprefix)
if ans is not None:
key = ans + ' ' + key
else:
key = aprefix + ':' + key
nattrdict[key] = val
attrnamemap[key] = okey
attrdict = nattrdict
attributes = self.attributes.get(nstag)
if attributes is not None:
for key in attrdict.keys():
if not key in attributes:
self.syntax_error("unknown attribute `%s' in tag `%s'" % (attrnamemap[key], tagname))
for key, val in attributes.items():
if val is not None and not key in attrdict:
attrdict[key] = val
method = self.elements.get(nstag, (None, None))[0]
self.finish_starttag(nstag, attrdict, method)
if tag.group('slash') == '/':
self.finish_endtag(tagname)
return tag.end(0)
# Internal -- parse endtag
def parse_endtag(self, i):
rawdata = self.rawdata
end = endbracketfind.match(rawdata, i+1)
if end is None:
return -1
res = tagfind.match(rawdata, i+2)
if res is None:
if self.literal:
self.handle_data(rawdata[i])
return i+1
if not self.__accept_missing_endtag_name:
self.syntax_error('no name specified in end tag')
tag = self.stack[-1][0]
k = i+2
else:
tag = res.group(0)
if self.__map_case:
tag = tag.lower()
if self.literal:
if not self.stack or tag != self.stack[-1][0]:
self.handle_data(rawdata[i])
return i+1
k = res.end(0)
if endbracket.match(rawdata, k) is None:
self.syntax_error('garbage in end tag')
self.finish_endtag(tag)
return end.end(0)
# Internal -- finish processing of start tag
def finish_starttag(self, tagname, attrdict, method):
if method is not None:
self.handle_starttag(tagname, method, attrdict)
else:
self.unknown_starttag(tagname, attrdict)
# Internal -- finish processing of end tag
def finish_endtag(self, tag):
self.literal = 0
if not tag:
self.syntax_error('name-less end tag')
found = len(self.stack) - 1
if found < 0:
self.unknown_endtag(tag)
return
else:
found = -1
for i in range(len(self.stack)):
if tag == self.stack[i][0]:
found = i
if found == -1:
self.syntax_error('unopened end tag')
return
while len(self.stack) > found:
if found < len(self.stack) - 1:
self.syntax_error('missing close tag for %s' % self.stack[-1][2])
nstag = self.stack[-1][2]
method = self.elements.get(nstag, (None, None))[1]
if method is not None:
self.handle_endtag(nstag, method)
else:
self.unknown_endtag(nstag)
if self.__use_namespaces == len(self.stack):
self.__use_namespaces = 0
del self.stack[-1]
# Overridable -- handle xml processing instruction
def handle_xml(self, encoding, standalone):
pass
# Overridable -- handle DOCTYPE
def handle_doctype(self, tag, pubid, syslit, data):
pass
# Overridable -- handle start tag
def handle_starttag(self, tag, method, attrs):
method(attrs)
# Overridable -- handle end tag
def handle_endtag(self, tag, method):
method()
# Example -- handle character reference, no need to override
def handle_charref(self, name):
try:
if name[0] == 'x':
n = int(name[1:], 16)
else:
n = int(name)
except ValueError:
self.unknown_charref(name)
return
if not 0 <= n <= 255:
self.unknown_charref(name)
return
self.handle_data(chr(n))
# Definition of entities -- derived classes may override
entitydefs = {'lt': '<', # must use charref
'gt': '>',
'amp': '&', # must use charref
'quot': '"',
'apos': ''',
}
# Example -- handle data, should be overridden
def handle_data(self, data):
pass
# Example -- handle cdata, could be overridden
def handle_cdata(self, data):
pass
# Example -- handle comment, could be overridden
def handle_comment(self, data):
pass
# Example -- handle processing instructions, could be overridden
def handle_proc(self, name, data):
pass
# Example -- handle relatively harmless syntax errors, could be overridden
def syntax_error(self, message):
raise Error('Syntax error at line %d: %s' % (self.lineno, message))
# To be overridden -- handlers for unknown objects
def unknown_starttag(self, tag, attrs): pass
def unknown_endtag(self, tag): pass
def unknown_charref(self, ref): pass
def unknown_entityref(self, name):
self.syntax_error("reference to unknown entity `&%s;'" % name)
class TestXMLParser(XMLParser):
def __init__(self, **kw):
self.testdata = ""
XMLParser.__init__(self, **kw)
def handle_xml(self, encoding, standalone):
self.flush()
print 'xml: encoding =',encoding,'standalone =',standalone
def handle_doctype(self, tag, pubid, syslit, data):
self.flush()
print 'DOCTYPE:',tag, repr(data)
def handle_data(self, data):
self.testdata = self.testdata + data
if len(repr(self.testdata)) >= 70:
self.flush()
def flush(self):
data = self.testdata
if data:
self.testdata = ""
print 'data:', repr(data)
def handle_cdata(self, data):
self.flush()
print 'cdata:', repr(data)
def handle_proc(self, name, data):
self.flush()
print 'processing:',name,repr(data)
def handle_comment(self, data):
self.flush()
r = repr(data)
if len(r) > 68:
r = r[:32] + '...' + r[-32:]
print 'comment:', r
def syntax_error(self, message):
print 'error at line %d:' % self.lineno, message
def unknown_starttag(self, tag, attrs):
self.flush()
if not attrs:
print 'start tag: <' + tag + '>'
else:
print 'start tag: <' + tag,
for name, value in attrs.items():
print name + '=' + '"' + value + '"',
print '>'
def unknown_endtag(self, tag):
self.flush()
print 'end tag: </' + tag + '>'
def unknown_entityref(self, ref):
self.flush()
print '*** unknown entity ref: &' + ref + ';'
def unknown_charref(self, ref):
self.flush()
print '*** unknown char ref: &#' + ref + ';'
def close(self):
XMLParser.close(self)
self.flush()
def test(args = None):
import sys, getopt
from time import time
if not args:
args = sys.argv[1:]
opts, args = getopt.getopt(args, 'st')
klass = TestXMLParser
do_time = 0
for o, a in opts:
if o == '-s':
klass = XMLParser
elif o == '-t':
do_time = 1
if args:
file = args[0]
else:
file = 'test.xml'
if file == '-':
f = sys.stdin
else:
try:
f = open(file, 'r')
except IOError, msg:
print file, ":", msg
sys.exit(1)
data = f.read()
if f is not sys.stdin:
f.close()
x = klass()
t0 = time()
try:
if do_time:
x.feed(data)
x.close()
else:
for c in data:
x.feed(c)
x.close()
except Error, msg:
t1 = time()
print msg
if do_time:
print 'total time: %g' % (t1-t0)
sys.exit(1)
t1 = time()
if do_time:
print 'total time: %g' % (t1-t0)
if __name__ == '__main__':
test()
|
BEASTLICK-INTERNET-POLICY-COMMISSION/bipcoin | refs/heads/master | external/gtest/test/gtest_shuffle_test.py | 3023 | #!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that test shuffling works."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
# Command to run the gtest_shuffle_test_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_shuffle_test_')
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
TEST_FILTER = 'A*.A:A*.B:C*'
ALL_TESTS = []
ACTIVE_TESTS = []
FILTERED_TESTS = []
SHARDED_TESTS = []
SHUFFLED_ALL_TESTS = []
SHUFFLED_ACTIVE_TESTS = []
SHUFFLED_FILTERED_TESTS = []
SHUFFLED_SHARDED_TESTS = []
def AlsoRunDisabledTestsFlag():
return '--gtest_also_run_disabled_tests'
def FilterFlag(test_filter):
return '--gtest_filter=%s' % (test_filter,)
def RepeatFlag(n):
return '--gtest_repeat=%s' % (n,)
def ShuffleFlag():
return '--gtest_shuffle'
def RandomSeedFlag(n):
return '--gtest_random_seed=%s' % (n,)
def RunAndReturnOutput(extra_env, args):
"""Runs the test program and returns its output."""
environ_copy = os.environ.copy()
environ_copy.update(extra_env)
return gtest_test_utils.Subprocess([COMMAND] + args, env=environ_copy).output
def GetTestsForAllIterations(extra_env, args):
"""Runs the test program and returns a list of test lists.
Args:
extra_env: a map from environment variables to their values
args: command line flags to pass to gtest_shuffle_test_
Returns:
A list where the i-th element is the list of tests run in the i-th
test iteration.
"""
test_iterations = []
for line in RunAndReturnOutput(extra_env, args).split('\n'):
if line.startswith('----'):
tests = []
test_iterations.append(tests)
elif line.strip():
tests.append(line.strip()) # 'TestCaseName.TestName'
return test_iterations
def GetTestCases(tests):
"""Returns a list of test cases in the given full test names.
Args:
tests: a list of full test names
Returns:
A list of test cases from 'tests', in their original order.
Consecutive duplicates are removed.
"""
test_cases = []
for test in tests:
test_case = test.split('.')[0]
if not test_case in test_cases:
test_cases.append(test_case)
return test_cases
def CalculateTestLists():
"""Calculates the list of tests run under different flags."""
if not ALL_TESTS:
ALL_TESTS.extend(
GetTestsForAllIterations({}, [AlsoRunDisabledTestsFlag()])[0])
if not ACTIVE_TESTS:
ACTIVE_TESTS.extend(GetTestsForAllIterations({}, [])[0])
if not FILTERED_TESTS:
FILTERED_TESTS.extend(
GetTestsForAllIterations({}, [FilterFlag(TEST_FILTER)])[0])
if not SHARDED_TESTS:
SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[])[0])
if not SHUFFLED_ALL_TESTS:
SHUFFLED_ALL_TESTS.extend(GetTestsForAllIterations(
{}, [AlsoRunDisabledTestsFlag(), ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_ACTIVE_TESTS:
SHUFFLED_ACTIVE_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_FILTERED_TESTS:
SHUFFLED_FILTERED_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), FilterFlag(TEST_FILTER)])[0])
if not SHUFFLED_SHARDED_TESTS:
SHUFFLED_SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(1)])[0])
class GTestShuffleUnitTest(gtest_test_utils.TestCase):
"""Tests test shuffling."""
def setUp(self):
CalculateTestLists()
def testShufflePreservesNumberOfTests(self):
self.assertEqual(len(ALL_TESTS), len(SHUFFLED_ALL_TESTS))
self.assertEqual(len(ACTIVE_TESTS), len(SHUFFLED_ACTIVE_TESTS))
self.assertEqual(len(FILTERED_TESTS), len(SHUFFLED_FILTERED_TESTS))
self.assertEqual(len(SHARDED_TESTS), len(SHUFFLED_SHARDED_TESTS))
def testShuffleChangesTestOrder(self):
self.assert_(SHUFFLED_ALL_TESTS != ALL_TESTS, SHUFFLED_ALL_TESTS)
self.assert_(SHUFFLED_ACTIVE_TESTS != ACTIVE_TESTS, SHUFFLED_ACTIVE_TESTS)
self.assert_(SHUFFLED_FILTERED_TESTS != FILTERED_TESTS,
SHUFFLED_FILTERED_TESTS)
self.assert_(SHUFFLED_SHARDED_TESTS != SHARDED_TESTS,
SHUFFLED_SHARDED_TESTS)
def testShuffleChangesTestCaseOrder(self):
self.assert_(GetTestCases(SHUFFLED_ALL_TESTS) != GetTestCases(ALL_TESTS),
GetTestCases(SHUFFLED_ALL_TESTS))
self.assert_(
GetTestCases(SHUFFLED_ACTIVE_TESTS) != GetTestCases(ACTIVE_TESTS),
GetTestCases(SHUFFLED_ACTIVE_TESTS))
self.assert_(
GetTestCases(SHUFFLED_FILTERED_TESTS) != GetTestCases(FILTERED_TESTS),
GetTestCases(SHUFFLED_FILTERED_TESTS))
self.assert_(
GetTestCases(SHUFFLED_SHARDED_TESTS) != GetTestCases(SHARDED_TESTS),
GetTestCases(SHUFFLED_SHARDED_TESTS))
def testShuffleDoesNotRepeatTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assertEqual(1, SHUFFLED_ALL_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assertEqual(1, SHUFFLED_ACTIVE_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assertEqual(1, SHUFFLED_FILTERED_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assertEqual(1, SHUFFLED_SHARDED_TESTS.count(test),
'%s appears more than once' % (test,))
def testShuffleDoesNotCreateNewTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assert_(test in ALL_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assert_(test in ACTIVE_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assert_(test in FILTERED_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assert_(test in SHARDED_TESTS, '%s is an invalid test' % (test,))
def testShuffleIncludesAllTests(self):
for test in ALL_TESTS:
self.assert_(test in SHUFFLED_ALL_TESTS, '%s is missing' % (test,))
for test in ACTIVE_TESTS:
self.assert_(test in SHUFFLED_ACTIVE_TESTS, '%s is missing' % (test,))
for test in FILTERED_TESTS:
self.assert_(test in SHUFFLED_FILTERED_TESTS, '%s is missing' % (test,))
for test in SHARDED_TESTS:
self.assert_(test in SHUFFLED_SHARDED_TESTS, '%s is missing' % (test,))
def testShuffleLeavesDeathTestsAtFront(self):
non_death_test_found = False
for test in SHUFFLED_ACTIVE_TESTS:
if 'DeathTest.' in test:
self.assert_(not non_death_test_found,
'%s appears after a non-death test' % (test,))
else:
non_death_test_found = True
def _VerifyTestCasesDoNotInterleave(self, tests):
test_cases = []
for test in tests:
[test_case, _] = test.split('.')
if test_cases and test_cases[-1] != test_case:
test_cases.append(test_case)
self.assertEqual(1, test_cases.count(test_case),
'Test case %s is not grouped together in %s' %
(test_case, tests))
def testShuffleDoesNotInterleaveTestCases(self):
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ALL_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ACTIVE_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_FILTERED_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_SHARDED_TESTS)
def testShuffleRestoresOrderAfterEachIteration(self):
# Get the test lists in all 3 iterations, using random seed 1, 2,
# and 3 respectively. Google Test picks a different seed in each
# iteration, and this test depends on the current implementation
# picking successive numbers. This dependency is not ideal, but
# makes the test much easier to write.
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
# Make sure running the tests with random seed 1 gets the same
# order as in iteration 1 above.
[tests_with_seed1] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])
self.assertEqual(tests_in_iteration1, tests_with_seed1)
# Make sure running the tests with random seed 2 gets the same
# order as in iteration 2 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 2.
[tests_with_seed2] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(2)])
self.assertEqual(tests_in_iteration2, tests_with_seed2)
# Make sure running the tests with random seed 3 gets the same
# order as in iteration 3 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 3.
[tests_with_seed3] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(3)])
self.assertEqual(tests_in_iteration3, tests_with_seed3)
def testShuffleGeneratesNewOrderInEachIteration(self):
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
self.assert_(tests_in_iteration1 != tests_in_iteration2,
tests_in_iteration1)
self.assert_(tests_in_iteration1 != tests_in_iteration3,
tests_in_iteration1)
self.assert_(tests_in_iteration2 != tests_in_iteration3,
tests_in_iteration2)
def testShuffleShardedTestsPreservesPartition(self):
# If we run M tests on N shards, the same M tests should be run in
# total, regardless of the random seeds used by the shards.
[tests1] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '0'},
[ShuffleFlag(), RandomSeedFlag(1)])
[tests2] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(20)])
[tests3] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '2'},
[ShuffleFlag(), RandomSeedFlag(25)])
sorted_sharded_tests = tests1 + tests2 + tests3
sorted_sharded_tests.sort()
sorted_active_tests = []
sorted_active_tests.extend(ACTIVE_TESTS)
sorted_active_tests.sort()
self.assertEqual(sorted_active_tests, sorted_sharded_tests)
if __name__ == '__main__':
gtest_test_utils.Main()
|
shft117/SteckerApp | refs/heads/master | erpnext/accounts/doctype/mode_of_payment/mode_of_payment.py | 52 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class ModeofPayment(Document):
pass
|
b0ttl3z/SickRage | refs/heads/master | sickbeard/clients/transmission_client.py | 7 | # coding=utf-8
# Author: Mr_Orange <mr_orange@hotmail.it>
# URL: https://sickrage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import json
import re
from base64 import b64encode
import sickbeard
from sickbeard.clients.generic import GenericClient
class TransmissionAPI(GenericClient):
def __init__(self, host=None, username=None, password=None):
super(TransmissionAPI, self).__init__('Transmission', host, username, password)
self.url = '/'.join((self.host.rstrip('/'), sickbeard.TORRENT_RPCURL.strip('/'), 'rpc'))
def _get_auth(self):
post_data = json.dumps({'method': 'session-get', })
try:
self.response = self.session.post(self.url, data=post_data.encode('utf-8'), timeout=120,
verify=sickbeard.TORRENT_VERIFY_CERT)
self.auth = re.search(r'X-Transmission-Session-Id:\s*(\w+)', self.response.text).group(1)
except Exception:
return None
self.session.headers.update({'x-transmission-session-id': self.auth})
# Validating Transmission authorization
post_data = json.dumps({'arguments': {},
'method': 'session-get'})
self._request(method='post', data=post_data)
return self.auth
def _add_torrent_uri(self, result):
arguments = {
'filename': result.url,
'paused': int(sickbeard.TORRENT_PAUSED)
}
if sickbeard.TORRENT_PATH:
arguments['download-dir'] = sickbeard.TORRENT_PATH
post_data = json.dumps({'arguments': arguments,
'method': 'torrent-add'})
self._request(method='post', data=post_data)
return self.response.json()['result'] == "success"
def _add_torrent_file(self, result):
arguments = {
'metainfo': b64encode(result.content),
'paused': 1 if sickbeard.TORRENT_PAUSED else 0
}
if sickbeard.TORRENT_PATH:
arguments['download-dir'] = sickbeard.TORRENT_PATH
post_data = json.dumps({'arguments': arguments,
'method': 'torrent-add'})
self._request(method='post', data=post_data)
return self.response.json()['result'] == "success"
def _set_torrent_ratio(self, result):
ratio = None
if result.ratio:
ratio = result.ratio
mode = 0
if ratio:
if float(ratio) == -1:
ratio = 0
mode = 2
elif float(ratio) >= 0:
ratio = float(ratio)
mode = 1 # Stop seeding at seedRatioLimit
arguments = {'ids': [result.hash],
'seedRatioLimit': ratio,
'seedRatioMode': mode}
post_data = json.dumps({'arguments': arguments,
'method': 'torrent-set'})
self._request(method='post', data=post_data)
return self.response.json()['result'] == "success"
def _set_torrent_seed_time(self, result):
if sickbeard.TORRENT_SEED_TIME and sickbeard.TORRENT_SEED_TIME != -1:
time = int(60 * float(sickbeard.TORRENT_SEED_TIME))
arguments = {'ids': [result.hash],
'seedIdleLimit': time,
'seedIdleMode': 1}
post_data = json.dumps({'arguments': arguments,
'method': 'torrent-set'})
self._request(method='post', data=post_data)
return self.response.json()['result'] == "success"
else:
return True
def _set_torrent_priority(self, result):
arguments = {'ids': [result.hash]}
if result.priority == -1:
arguments['priority-low'] = []
elif result.priority == 1:
# set high priority for all files in torrent
arguments['priority-high'] = []
# move torrent to the top if the queue
arguments['queuePosition'] = 0
if sickbeard.TORRENT_HIGH_BANDWIDTH:
arguments['bandwidthPriority'] = 1
else:
arguments['priority-normal'] = []
post_data = json.dumps({'arguments': arguments,
'method': 'torrent-set'})
self._request(method='post', data=post_data)
return self.response.json()['result'] == "success"
api = TransmissionAPI()
|
bgris/ODL_bgris | refs/heads/master | lib/python3.5/site-packages/numpy/distutils/fcompiler/none.py | 229 | from __future__ import division, absolute_import, print_function
from numpy.distutils.fcompiler import FCompiler
compilers = ['NoneFCompiler']
class NoneFCompiler(FCompiler):
compiler_type = 'none'
description = 'Fake Fortran compiler'
executables = {'compiler_f77': None,
'compiler_f90': None,
'compiler_fix': None,
'linker_so': None,
'linker_exe': None,
'archiver': None,
'ranlib': None,
'version_cmd': None,
}
def find_executables(self):
pass
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
compiler = NoneFCompiler()
compiler.customize()
print(compiler.get_version())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.