repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
gj1292/Plexus
|
refs/heads/master
|
graph.py
|
1
|
__author__ = "gjoshi"
class Graph(object):
def __init__(self):
self.no_of_nodes = 0
self.no_of_edges = 0
self.nodes = []
self.edges = []
self.adj = {}
def add_node(self, node):
self.nodes.append(node)
self.adj[node] = []
def add_nodes_from(self, list_of_nodes):
for node in list_of_nodes:
self.nodes.append(node)
self.adj[node] = []
def add_edge(self, source, destination):
self.adj[source].append(destination)
self.adj[destination].append(source)
def nodes_iter(self):
return iter(self.nodes)
|
LeoTestard/rust
|
refs/heads/master
|
src/etc/extract-tests.py
|
1
|
# xfail-license
# Script for extracting compilable fragments from markdown
# documentation. See prep.js for a description of the format
# recognized by this tool. Expects a directory fragments/ to exist
# under the current directory, and writes the fragments in there as
# individual .rs files.
import sys, re
if len(sys.argv) < 3:
print("Please provide an input filename")
sys.exit(1)
filename = sys.argv[1]
dest = sys.argv[2]
f = open(filename)
lines = f.readlines()
f.close()
cur = 0
line = ""
chapter = ""
chapter_n = 0
while cur < len(lines):
line = lines[cur]
cur += 1
chap = re.match("# (.*)", line)
if chap:
chapter = re.sub(r"\W", "_", chap.group(1)).lower()
chapter_n = 1
elif re.match("~~~", line):
# Parse the tags that open a code block in the pandoc format:
# ~~~ {.tag1 .tag2}
tags = re.findall("\.([\w-]*)", line)
block = ""
ignore = "notrust" in tags or "ignore" in tags
# Some tags used by the language ref that indicate not rust
ignore |= "ebnf" in tags
ignore |= "abnf" in tags
ignore |= "keyword" in tags
ignore |= "field" in tags
ignore |= "precedence" in tags
xfail = "xfail-test" in tags
while cur < len(lines):
line = lines[cur]
cur += 1
if re.match("~~~", line):
break
else:
# Lines beginning with '# ' are turned into valid code
line = re.sub("^# ", "", line)
# Allow ellipses in code snippets
line = re.sub("\.\.\.", "", line)
block += line
if not ignore:
if not re.search(r"\bfn main\b", block):
block = "fn main() {\n" + block + "\n}\n"
if not re.search(r"\bextern mod extra\b", block):
block = "extern mod extra;\n" + block
block = """#[ deny(warnings) ];
#[ allow(unused_variable) ];\n
#[ allow(dead_assignment) ];\n
#[ allow(unused_mut) ];\n
#[ feature(macro_rules, globs, struct_variant) ];\n
""" + block
if xfail:
block = "// xfail-test\n" + block
filename = (dest + "/" + str(chapter)
+ "_" + str(chapter_n) + ".rs")
chapter_n += 1
f = open(filename, 'w')
f.write(block)
f.close()
|
MechCoder/scikit-learn
|
refs/heads/master
|
examples/mixture/plot_gmm_pdf.py
|
140
|
"""
=========================================
Density Estimation for a Gaussian mixture
=========================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GaussianMixture(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20., 30.)
y = np.linspace(-20., 40.)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show()
|
TTimo/es_core
|
refs/heads/master
|
site_scons/site_init.py
|
2
|
# scons imports this module at startup. see http://www.scons.org/doc/production/HTML/scons-user/x3822.html
import os, platform
( system, node, release, version, machine, processor ) = platform.uname()
from SCons.Script import *
def _DoAssembleBundle( target, source, env ):
topdir = os.getcwd()
try:
print('create the bundle structure for %s' % str( target[0] ) )
bundle_dir = str( target[0] )
source_bin = str( source[0] )
macos_dir = os.path.join( bundle_dir, 'Contents/MacOS' )
os.makedirs( macos_dir )
os.chdir( macos_dir )
os.symlink( os.path.join( '../../..', source_bin ), source_bin )
os.chdir( topdir )
resource_path = os.path.join( bundle_dir, 'Contents/Resources' )
os.makedirs( resource_path )
contents_dir = os.path.join( bundle_dir, 'Contents' )
os.chdir( contents_dir )
ogre_bin_dir = os.path.join( env['OGRE_SRC'], 'lib/Release' )
os.symlink( ogre_bin_dir, 'Frameworks' )
os.symlink( ogre_bin_dir, 'Plugins' )
os.chdir( topdir )
except:
os.chdir( topdir )
raise
def AppendOSXBundleBuilder( env ):
if ( system == 'Darwin' ):
b = Builder( action = _DoAssembleBundle )
else:
# dummy builder that does nothing
b = Builder( action = '' )
env.Append( BUILDERS = { 'Bundle' : b } )
return env
|
Toure/Iridium
|
refs/heads/master
|
iridium/test/validation/sanity.py
|
1
|
import pytest
from test import nova_tests
class SanityTest(object):
"""
Sanity test will run through the basic test and output the result in a
xunit file with the help of py.test.
:return: xunit test result.
"""
pass
|
robovm/robovm-studio
|
refs/heads/master
|
python/testData/resolve/callee/ClassCall.py
|
83
|
class A:
def foo(self):
a = A()
A.f<caret>oo(a)
|
nharraud/b2share
|
refs/heads/master
|
invenio/ext/session/model.py
|
16
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2011, 2012, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Example implementation of SQLAlchemy session model backend."""
from datetime import datetime
from flask_login import current_user
from invenio.ext.sqlalchemy import db
class Session(db.Model):
"""Represent Session record."""
__tablename__ = 'session'
session_key = db.Column(db.String(32), nullable=False,
server_default='', primary_key=True)
session_expiry = db.Column(db.DateTime, nullable=True, index=True)
session_object = db.Column(db.LargeBinary, nullable=True)
uid = db.Column(db.Integer(15, unsigned=True), nullable=False, index=True)
def get_session(self, name, expired=False):
"""Return an instance of :class:`Session`."""
where = Session.session_key == name
if expired:
where = db.and_(
where, Session.session_expiry >= db.func.current_timestamp())
return self.query.filter(where).one()
def set_session(self, name, value, timeout=None):
"""Store value in database."""
uid = current_user.get_id()
session_expiry = datetime.utcnow() + timeout
return Session(session_key=name,
session_object=value,
session_expiry=session_expiry,
uid=uid)
|
stefanw/froide
|
refs/heads/master
|
manage.py
|
7
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "froide.settings")
os.environ.setdefault("DJANGO_CONFIGURATION", "Dev")
from configurations.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
rhattersley/iris-grib
|
refs/heads/master
|
lib/iris/fileformats/grib/message.py
|
7
|
# (C) British Crown Copyright 2014 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Defines a lightweight wrapper class to wrap a single GRIB message.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
from collections import namedtuple
import re
import biggus
import gribapi
import numpy as np
from iris.exceptions import TranslationError
class _OpenFileRef(object):
"""
A reference to an open file that ensures that the file is closed
when the object is garbage collected.
"""
def __init__(self, open_file):
self.open_file = open_file
def __del__(self):
if not self.open_file.closed:
self.open_file.close()
class GribMessage(object):
"""
An in-memory representation of a GribMessage, providing
access to the :meth:`~GribMessage.data` payload and the metadata
elements by section via the :meth:`~GribMessage.sections` property.
"""
@staticmethod
def messages_from_filename(filename):
"""
Return a generator of :class:`GribMessage` instances; one for
each message in the supplied GRIB file.
Args:
* filename (string):
Name of the file to generate fields from.
"""
grib_fh = open(filename, 'rb')
# create an _OpenFileRef to manage the closure of the file handle
file_ref = _OpenFileRef(grib_fh)
while True:
offset = grib_fh.tell()
grib_id = gribapi.grib_new_from_file(grib_fh)
if grib_id is None:
break
raw_message = _RawGribMessage(grib_id)
recreate_raw = _MessageLocation(filename, offset)
yield GribMessage(raw_message, recreate_raw, file_ref=file_ref)
def __init__(self, raw_message, recreate_raw, file_ref=None):
"""
It is recommended to obtain GribMessage instance from the static method
:meth:`~GribMessage.messages_from_filename`, rather than creating
them directly.
"""
# A RawGribMessage giving gribapi access to the original grib message.
self._raw_message = raw_message
# A _MessageLocation which biggus uses to read the message data array,
# by which time this message may be dead and the original grib file
# closed.
self._recreate_raw = recreate_raw
# An _OpenFileRef to keep the grib file open while this GribMessage is
# alive, so that we can always use self._raw_message to fetch keys.
self._file_ref = file_ref
@property
def sections(self):
"""
Return the key-value pairs of the message keys, grouped by containing
section.
Sections in a message are indexed by GRIB section-number,
and values in a section are indexed by key strings.
.. For example::
print(grib_message.sections[4]['parameterNumber'])
grib_message.sections[1]['minute'] = 0
"""
return self._raw_message.sections
@property
def data(self):
"""
The data array from the GRIB message as a biggus Array.
The shape of the array will match the logical shape of the
message's grid. For example, a simple global grid would be
available as a 2-dimensional array with shape (Nj, Ni).
"""
sections = self.sections
grid_section = sections[3]
if grid_section['sourceOfGridDefinition'] != 0:
raise TranslationError(
'Unsupported source of grid definition: {}'.format(
grid_section['sourceOfGridDefinition']))
reduced = (grid_section['numberOfOctectsForNumberOfPoints'] != 0 or
grid_section['interpretationOfNumberOfPoints'] != 0)
template = grid_section['gridDefinitionTemplateNumber']
if reduced and template not in (40,):
raise TranslationError('Grid definition Section 3 contains '
'unsupported quasi-regular grid.')
if template in (0, 1, 5, 12, 20, 30, 40, 90):
# We can ignore the first two bits (i-neg, j-pos) because
# that is already captured in the coordinate values.
if grid_section['scanningMode'] & 0x3f:
msg = 'Unsupported scanning mode: {}'.format(
grid_section['scanningMode'])
raise TranslationError(msg)
if template in (20, 30, 90):
shape = (grid_section['Ny'], grid_section['Nx'])
elif template == 40 and reduced:
shape = (grid_section['numberOfDataPoints'],)
else:
shape = (grid_section['Nj'], grid_section['Ni'])
proxy = _DataProxy(shape, np.dtype('f8'), np.nan,
self._recreate_raw)
data = biggus.NumpyArrayAdapter(proxy)
else:
fmt = 'Grid definition template {} is not supported'
raise TranslationError(fmt.format(template))
return data
def __getstate__(self):
"""
Alter state of object prior to pickle, ensure open file is closed.
"""
if not self._file_ref.open_file.closed:
self._file_ref.open_file.close()
return self
class _MessageLocation(namedtuple('_MessageLocation', 'filename offset')):
"""A reference to a specific GRIB message within a file."""
__slots__ = ()
def __call__(self):
return _RawGribMessage.from_file_offset(self.filename, self.offset)
class _DataProxy(object):
"""A reference to the data payload of a single GRIB message."""
__slots__ = ('shape', 'dtype', 'fill_value', 'recreate_raw')
def __init__(self, shape, dtype, fill_value, recreate_raw):
self.shape = shape
self.dtype = dtype
self.fill_value = fill_value
self.recreate_raw = recreate_raw
@property
def ndim(self):
return len(self.shape)
def _bitmap(self, bitmap_section):
"""
Get the bitmap for the data from the message. The GRIB spec defines
that the bitmap is composed of values 0 or 1, where:
* 0: no data value at corresponding data point (data point masked).
* 1: data value at corresponding data point (data point unmasked).
The bitmap can take the following values:
* 0: Bitmap applies to the data and is specified in this section
of this message.
* 1-253: Bitmap applies to the data, is specified by originating
centre and is not specified in section 6 of this message.
* 254: Bitmap applies to the data, is specified in an earlier
section 6 of this message and is not specified in this
section 6 of this message.
* 255: Bitmap does not apply to the data.
Only values 0 and 255 are supported.
Returns the bitmap as a 1D array of length equal to the
number of data points in the message.
"""
# Reference GRIB2 Code Table 6.0.
bitMapIndicator = bitmap_section['bitMapIndicator']
if bitMapIndicator == 0:
bitmap = bitmap_section['bitmap']
elif bitMapIndicator == 255:
bitmap = None
else:
msg = 'Bitmap Section 6 contains unsupported ' \
'bitmap indicator [{}]'.format(bitMapIndicator)
raise TranslationError(msg)
return bitmap
def __getitem__(self, keys):
# NB. Currently assumes that the validity of this interpretation
# is checked before this proxy is created.
message = self.recreate_raw()
sections = message.sections
bitmap_section = sections[6]
bitmap = self._bitmap(bitmap_section)
data = sections[7]['codedValues']
if bitmap is not None:
# Note that bitmap and data are both 1D arrays at this point.
if np.count_nonzero(bitmap) == data.shape[0]:
# Only the non-masked values are included in codedValues.
_data = np.empty(shape=bitmap.shape)
_data[bitmap.astype(bool)] = data
# `np.ma.masked_array` masks where input = 1, the opposite of
# the behaviour specified by the GRIB spec.
data = np.ma.masked_array(_data, mask=np.logical_not(bitmap))
else:
msg = 'Shapes of data and bitmap do not match.'
raise TranslationError(msg)
data = data.reshape(self.shape)
return data.__getitem__(keys)
def __repr__(self):
msg = '<{self.__class__.__name__} shape={self.shape} ' \
'dtype={self.dtype!r} fill_value={self.fill_value!r} ' \
'recreate_raw={self.recreate_raw!r} '
return msg.format(self=self)
def __getstate__(self):
return {attr: getattr(self, attr) for attr in self.__slots__}
def __setstate__(self, state):
for key, value in six.iteritems(state):
setattr(self, key, value)
class _RawGribMessage(object):
"""
Lightweight GRIB message wrapper, containing **only** the coded keys
of the input GRIB message.
"""
_NEW_SECTION_KEY_MATCHER = re.compile(r'section([0-9]{1})Length')
@staticmethod
def from_file_offset(filename, offset):
with open(filename, 'rb') as f:
f.seek(offset)
message_id = gribapi.grib_new_from_file(f)
if message_id is None:
fmt = 'Invalid GRIB message: {} @ {}'
raise RuntimeError(fmt.format(filename, offset))
return _RawGribMessage(message_id)
def __init__(self, message_id):
"""
A _RawGribMessage object contains the **coded** keys from a
GRIB message that is identified by the input message id.
Args:
* message_id:
An integer generated by gribapi referencing a GRIB message within
an open GRIB file.
"""
self._message_id = message_id
self._sections = None
def __del__(self):
"""
Release the gribapi reference to the message at end of object's life.
"""
gribapi.grib_release(self._message_id)
@property
def sections(self):
"""
Return the key-value pairs of the message keys, grouped by containing
section.
Key-value pairs are collected into a dictionary of
:class:`Section` objects. One such object is made for
each section in the message, such that the section number is the
object's key in the containing dictionary. Each object contains
key-value pairs for all of the message keys in the given section.
"""
if self._sections is None:
self._sections = self._get_message_sections()
return self._sections
def _get_message_keys(self):
"""Creates a generator of all the keys in the message."""
keys_itr = gribapi.grib_keys_iterator_new(self._message_id)
gribapi.grib_skip_computed(keys_itr)
while gribapi.grib_keys_iterator_next(keys_itr):
yield gribapi.grib_keys_iterator_get_name(keys_itr)
gribapi.grib_keys_iterator_delete(keys_itr)
def _get_message_sections(self):
"""
Group keys by section.
Returns a dictionary mapping section number to :class:`Section`
instance.
.. seealso::
The sections property (:meth:`~sections`).
"""
sections = {}
# The first keys in a message are for the whole message and are
# contained in section 0.
section = new_section = 0
section_keys = []
for key_name in self._get_message_keys():
# The `section<1-7>Length` keys mark the start of each new
# section, except for section 8 which is marked by the key '7777'.
key_match = re.match(self._NEW_SECTION_KEY_MATCHER, key_name)
if key_match is not None:
new_section = int(key_match.group(1))
elif key_name == '7777':
new_section = 8
if section != new_section:
sections[section] = Section(self._message_id, section,
section_keys)
section_keys = []
section = new_section
section_keys.append(key_name)
sections[section] = Section(self._message_id, section, section_keys)
return sections
class Section(object):
"""
A Section of a GRIB message, supporting dictionary like access to
attributes using gribapi key strings.
Values for keys may be changed using assignment but this does not
write to the file.
"""
# Keys are read from the file as required and values are cached.
# Within GribMessage instances all keys will have been fetched
def __init__(self, message_id, number, keys):
self._message_id = message_id
self._number = number
self._keys = keys
self._cache = {}
def __repr__(self):
items = []
for key in self._keys:
value = self._cache.get(key, '?')
items.append('{}={}'.format(key, value))
return '<{} {}: {}>'.format(type(self).__name__, self._number,
', '.join(items))
def __getitem__(self, key):
if key not in self._cache:
if key == 'numberOfSection':
value = self._number
elif key not in self._keys:
raise KeyError('{!r} not defined in section {}'.format(
key, self._number))
else:
value = self._get_key_value(key)
self._cache[key] = value
return self._cache[key]
def __setitem__(self, key, value):
# Allow the overwriting of any entry already in the _cache.
if key in self._cache:
self._cache[key] = value
else:
raise KeyError('{!r} cannot be redefined in '
'section {}'.format(key, self._number))
def _get_key_value(self, key):
"""
Get the value associated with the given key in the GRIB message.
Args:
* key:
The GRIB key to retrieve the value of.
Returns the value associated with the requested key in the GRIB
message.
"""
vector_keys = ('codedValues', 'pv', 'satelliteSeries',
'satelliteNumber', 'instrumentType',
'scaleFactorOfCentralWaveNumber',
'scaledValueOfCentralWaveNumber',
'longitudes', 'latitudes')
if key in vector_keys:
res = gribapi.grib_get_array(self._message_id, key)
elif key == 'bitmap':
# The bitmap is stored as contiguous boolean bits, one bit for each
# data point. GRIBAPI returns these as strings, so it must be
# type-cast to return an array of ints (0, 1).
res = gribapi.grib_get_array(self._message_id, key, int)
elif key in ('typeOfFirstFixedSurface', 'typeOfSecondFixedSurface'):
# By default these values are returned as unhelpful strings but
# we can use int representation to compare against instead.
res = gribapi.grib_get(self._message_id, key, int)
else:
res = gribapi.grib_get(self._message_id, key)
return res
def get_computed_key(self, key):
"""
Get the computed value associated with the given key in the GRIB
message.
Args:
* key:
The GRIB key to retrieve the value of.
Returns the value associated with the requested key in the GRIB
message.
"""
vector_keys = ('longitudes', 'latitudes', 'distinctLatitudes')
if key in vector_keys:
res = gribapi.grib_get_array(self._message_id, key)
else:
res = gribapi.grib_get(self._message_id, key)
return res
def keys(self):
"""Return coded keys available in this Section."""
return self._keys
|
abadger/ansible
|
refs/heads/devel
|
test/units/template/test_templar.py
|
55
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from jinja2.runtime import Context
from units.compat import unittest
from units.compat.mock import patch
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleUndefinedVariable
from ansible.module_utils.six import string_types
from ansible.template import Templar, AnsibleContext, AnsibleEnvironment, AnsibleUndefined
from ansible.utils.unsafe_proxy import AnsibleUnsafe, wrap_var
from units.mock.loader import DictDataLoader
class BaseTemplar(object):
def setUp(self):
self.test_vars = dict(
foo="bar",
bam="{{foo}}",
num=1,
var_true=True,
var_false=False,
var_dict=dict(a="b"),
bad_dict="{a='b'",
var_list=[1],
recursive="{{recursive}}",
some_var="blip",
some_static_var="static_blip",
some_keyword="{{ foo }}",
some_unsafe_var=wrap_var("unsafe_blip"),
some_static_unsafe_var=wrap_var("static_unsafe_blip"),
some_unsafe_keyword=wrap_var("{{ foo }}"),
str_with_error="{{ 'str' | from_json }}",
)
self.fake_loader = DictDataLoader({
"/path/to/my_file.txt": "foo\n",
})
self.templar = Templar(loader=self.fake_loader, variables=self.test_vars)
self._ansible_context = AnsibleContext(self.templar.environment, {}, {}, {})
def is_unsafe(self, obj):
return self._ansible_context._is_unsafe(obj)
# class used for testing arbitrary objects passed to template
class SomeClass(object):
foo = 'bar'
def __init__(self):
self.blip = 'blip'
class SomeUnsafeClass(AnsibleUnsafe):
def __init__(self):
super(SomeUnsafeClass, self).__init__()
self.blip = 'unsafe blip'
class TestTemplarTemplate(BaseTemplar, unittest.TestCase):
def test_lookup_jinja_dict_key_in_static_vars(self):
res = self.templar.template("{'some_static_var': '{{ some_var }}'}",
static_vars=['some_static_var'])
# self.assertEqual(res['{{ a_keyword }}'], "blip")
print(res)
def test_is_possibly_template_true(self):
tests = [
'{{ foo }}',
'{% foo %}',
'{# foo #}',
'{# {{ foo }} #}',
'{# {{ nothing }} {# #}',
'{# {{ nothing }} {# #} #}',
'{% raw %}{{ foo }}{% endraw %}',
'{{',
'{%',
'{#',
'{% raw',
]
for test in tests:
self.assertTrue(self.templar.is_possibly_template(test))
def test_is_possibly_template_false(self):
tests = [
'{',
'%',
'#',
'foo',
'}}',
'%}',
'raw %}',
'#}',
]
for test in tests:
self.assertFalse(self.templar.is_possibly_template(test))
def test_is_possible_template(self):
"""This test ensures that a broken template still gets templated"""
# Purposefully invalid jinja
self.assertRaises(AnsibleError, self.templar.template, '{{ foo|default(False)) }}')
def test_is_template_true(self):
tests = [
'{{ foo }}',
'{% foo %}',
'{# foo #}',
'{# {{ foo }} #}',
'{# {{ nothing }} {# #}',
'{# {{ nothing }} {# #} #}',
'{% raw %}{{ foo }}{% endraw %}',
]
for test in tests:
self.assertTrue(self.templar.is_template(test))
def test_is_template_false(self):
tests = [
'foo',
'{{ foo',
'{% foo',
'{# foo',
'{{ foo %}',
'{{ foo #}',
'{% foo }}',
'{% foo #}',
'{# foo %}',
'{# foo }}',
'{{ foo {{',
'{% raw %}{% foo %}',
]
for test in tests:
self.assertFalse(self.templar.is_template(test))
def test_is_template_raw_string(self):
res = self.templar.is_template('foo')
self.assertFalse(res)
def test_is_template_none(self):
res = self.templar.is_template(None)
self.assertFalse(res)
def test_template_convert_bare_string(self):
res = self.templar.template('foo', convert_bare=True)
self.assertEqual(res, 'bar')
def test_template_convert_bare_nested(self):
res = self.templar.template('bam', convert_bare=True)
self.assertEqual(res, 'bar')
def test_template_convert_bare_unsafe(self):
res = self.templar.template('some_unsafe_var', convert_bare=True)
self.assertEqual(res, 'unsafe_blip')
# self.assertIsInstance(res, AnsibleUnsafe)
self.assertTrue(self.is_unsafe(res), 'returned value from template.template (%s) is not marked unsafe' % res)
def test_template_convert_bare_filter(self):
res = self.templar.template('bam|capitalize', convert_bare=True)
self.assertEqual(res, 'Bar')
def test_template_convert_bare_filter_unsafe(self):
res = self.templar.template('some_unsafe_var|capitalize', convert_bare=True)
self.assertEqual(res, 'Unsafe_blip')
# self.assertIsInstance(res, AnsibleUnsafe)
self.assertTrue(self.is_unsafe(res), 'returned value from template.template (%s) is not marked unsafe' % res)
def test_template_convert_data(self):
res = self.templar.template('{{foo}}', convert_data=True)
self.assertTrue(res)
self.assertEqual(res, 'bar')
@patch('ansible.template.safe_eval', side_effect=AnsibleError)
def test_template_convert_data_template_in_data(self, mock_safe_eval):
res = self.templar.template('{{bam}}', convert_data=True)
self.assertTrue(res)
self.assertEqual(res, 'bar')
def test_template_convert_data_bare(self):
res = self.templar.template('bam', convert_data=True)
self.assertTrue(res)
self.assertEqual(res, 'bam')
def test_template_convert_data_to_json(self):
res = self.templar.template('{{bam|to_json}}', convert_data=True)
self.assertTrue(res)
self.assertEqual(res, '"bar"')
def test_template_convert_data_convert_bare_data_bare(self):
res = self.templar.template('bam', convert_data=True, convert_bare=True)
self.assertTrue(res)
self.assertEqual(res, 'bar')
def test_template_unsafe_non_string(self):
unsafe_obj = AnsibleUnsafe()
res = self.templar.template(unsafe_obj)
self.assertTrue(self.is_unsafe(res), 'returned value from template.template (%s) is not marked unsafe' % res)
def test_template_unsafe_non_string_subclass(self):
unsafe_obj = SomeUnsafeClass()
res = self.templar.template(unsafe_obj)
self.assertTrue(self.is_unsafe(res), 'returned value from template.template (%s) is not marked unsafe' % res)
def test_weird(self):
data = u'''1 2 #}huh{# %}ddfg{% }}dfdfg{{ {%what%} {{#foo#}} {%{bar}%} {#%blip%#} {{asdfsd%} 3 4 {{foo}} 5 6 7'''
self.assertRaisesRegexp(AnsibleError,
'template error while templating string',
self.templar.template,
data)
def test_template_with_error(self):
"""Check that AnsibleError is raised, fail if an unhandled exception is raised"""
self.assertRaises(AnsibleError, self.templar.template, "{{ str_with_error }}")
class TestTemplarMisc(BaseTemplar, unittest.TestCase):
def test_templar_simple(self):
templar = self.templar
# test some basic templating
self.assertEqual(templar.template("{{foo}}"), "bar")
self.assertEqual(templar.template("{{foo}}\n"), "bar\n")
self.assertEqual(templar.template("{{foo}}\n", preserve_trailing_newlines=True), "bar\n")
self.assertEqual(templar.template("{{foo}}\n", preserve_trailing_newlines=False), "bar")
self.assertEqual(templar.template("{{bam}}"), "bar")
self.assertEqual(templar.template("{{num}}"), 1)
self.assertEqual(templar.template("{{var_true}}"), True)
self.assertEqual(templar.template("{{var_false}}"), False)
self.assertEqual(templar.template("{{var_dict}}"), dict(a="b"))
self.assertEqual(templar.template("{{bad_dict}}"), "{a='b'")
self.assertEqual(templar.template("{{var_list}}"), [1])
self.assertEqual(templar.template(1, convert_bare=True), 1)
# force errors
self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{bad_var}}")
self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{lookup('file', bad_var)}}")
self.assertRaises(AnsibleError, templar.template, "{{lookup('bad_lookup')}}")
self.assertRaises(AnsibleError, templar.template, "{{recursive}}")
self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{foo-bar}}")
# test with fail_on_undefined=False
self.assertEqual(templar.template("{{bad_var}}", fail_on_undefined=False), "{{bad_var}}")
# test setting available_variables
templar.available_variables = dict(foo="bam")
self.assertEqual(templar.template("{{foo}}"), "bam")
# variables must be a dict() for available_variables setter
# FIXME Use assertRaises() as a context manager (added in 2.7) once we do not run tests on Python 2.6 anymore.
try:
templar.available_variables = "foo=bam"
except AssertionError:
pass
except Exception as e:
self.fail(e)
def test_templar_escape_backslashes(self):
# Rule of thumb: If escape backslashes is True you should end up with
# the same number of backslashes as when you started.
self.assertEqual(self.templar.template("\t{{foo}}", escape_backslashes=True), "\tbar")
self.assertEqual(self.templar.template("\t{{foo}}", escape_backslashes=False), "\tbar")
self.assertEqual(self.templar.template("\\{{foo}}", escape_backslashes=True), "\\bar")
self.assertEqual(self.templar.template("\\{{foo}}", escape_backslashes=False), "\\bar")
self.assertEqual(self.templar.template("\\{{foo + '\t' }}", escape_backslashes=True), "\\bar\t")
self.assertEqual(self.templar.template("\\{{foo + '\t' }}", escape_backslashes=False), "\\bar\t")
self.assertEqual(self.templar.template("\\{{foo + '\\t' }}", escape_backslashes=True), "\\bar\\t")
self.assertEqual(self.templar.template("\\{{foo + '\\t' }}", escape_backslashes=False), "\\bar\t")
self.assertEqual(self.templar.template("\\{{foo + '\\\\t' }}", escape_backslashes=True), "\\bar\\\\t")
self.assertEqual(self.templar.template("\\{{foo + '\\\\t' }}", escape_backslashes=False), "\\bar\\t")
def test_template_jinja2_extensions(self):
fake_loader = DictDataLoader({})
templar = Templar(loader=fake_loader)
old_exts = C.DEFAULT_JINJA2_EXTENSIONS
try:
C.DEFAULT_JINJA2_EXTENSIONS = "foo,bar"
self.assertEqual(templar._get_extensions(), ['foo', 'bar'])
finally:
C.DEFAULT_JINJA2_EXTENSIONS = old_exts
class TestTemplarLookup(BaseTemplar, unittest.TestCase):
def test_lookup_missing_plugin(self):
self.assertRaisesRegexp(AnsibleError,
r'lookup plugin \(not_a_real_lookup_plugin\) not found',
self.templar._lookup,
'not_a_real_lookup_plugin',
'an_arg', a_keyword_arg='a_keyword_arg_value')
def test_lookup_list(self):
res = self.templar._lookup('list', 'an_arg', 'another_arg')
self.assertEqual(res, 'an_arg,another_arg')
def test_lookup_jinja_undefined(self):
self.assertRaisesRegexp(AnsibleUndefinedVariable,
"'an_undefined_jinja_var' is undefined",
self.templar._lookup,
'list', '{{ an_undefined_jinja_var }}')
def test_lookup_jinja_defined(self):
res = self.templar._lookup('list', '{{ some_var }}')
self.assertTrue(self.is_unsafe(res))
# self.assertIsInstance(res, AnsibleUnsafe)
def test_lookup_jinja_dict_string_passed(self):
self.assertRaisesRegexp(AnsibleError,
"with_dict expects a dict",
self.templar._lookup,
'dict',
'{{ some_var }}')
def test_lookup_jinja_dict_list_passed(self):
self.assertRaisesRegexp(AnsibleError,
"with_dict expects a dict",
self.templar._lookup,
'dict',
['foo', 'bar'])
def test_lookup_jinja_kwargs(self):
res = self.templar._lookup('list', 'blip', random_keyword='12345')
self.assertTrue(self.is_unsafe(res))
# self.assertIsInstance(res, AnsibleUnsafe)
def test_lookup_jinja_list_wantlist(self):
res = self.templar._lookup('list', '{{ some_var }}', wantlist=True)
self.assertEqual(res, ["blip"])
def test_lookup_jinja_list_wantlist_undefined(self):
self.assertRaisesRegexp(AnsibleUndefinedVariable,
"'some_undefined_var' is undefined",
self.templar._lookup,
'list',
'{{ some_undefined_var }}',
wantlist=True)
def test_lookup_jinja_list_wantlist_unsafe(self):
res = self.templar._lookup('list', '{{ some_unsafe_var }}', wantlist=True)
for lookup_result in res:
self.assertTrue(self.is_unsafe(lookup_result))
# self.assertIsInstance(lookup_result, AnsibleUnsafe)
# Should this be an AnsibleUnsafe
# self.assertIsInstance(res, AnsibleUnsafe)
def test_lookup_jinja_dict(self):
res = self.templar._lookup('list', {'{{ a_keyword }}': '{{ some_var }}'})
self.assertEqual(res['{{ a_keyword }}'], "blip")
# TODO: Should this be an AnsibleUnsafe
# self.assertIsInstance(res['{{ a_keyword }}'], AnsibleUnsafe)
# self.assertIsInstance(res, AnsibleUnsafe)
def test_lookup_jinja_dict_unsafe(self):
res = self.templar._lookup('list', {'{{ some_unsafe_key }}': '{{ some_unsafe_var }}'})
self.assertTrue(self.is_unsafe(res['{{ some_unsafe_key }}']))
# self.assertIsInstance(res['{{ some_unsafe_key }}'], AnsibleUnsafe)
# TODO: Should this be an AnsibleUnsafe
# self.assertIsInstance(res, AnsibleUnsafe)
def test_lookup_jinja_dict_unsafe_value(self):
res = self.templar._lookup('list', {'{{ a_keyword }}': '{{ some_unsafe_var }}'})
self.assertTrue(self.is_unsafe(res['{{ a_keyword }}']))
# self.assertIsInstance(res['{{ a_keyword }}'], AnsibleUnsafe)
# TODO: Should this be an AnsibleUnsafe
# self.assertIsInstance(res, AnsibleUnsafe)
def test_lookup_jinja_none(self):
res = self.templar._lookup('list', None)
self.assertIsNone(res)
class TestAnsibleContext(BaseTemplar, unittest.TestCase):
def _context(self, variables=None):
variables = variables or {}
env = AnsibleEnvironment()
context = AnsibleContext(env, parent={}, name='some_context',
blocks={})
for key, value in variables.items():
context.vars[key] = value
return context
def test(self):
context = self._context()
self.assertIsInstance(context, AnsibleContext)
self.assertIsInstance(context, Context)
def test_resolve_unsafe(self):
context = self._context(variables={'some_unsafe_key': wrap_var('some_unsafe_string')})
res = context.resolve('some_unsafe_key')
# self.assertIsInstance(res, AnsibleUnsafe)
self.assertTrue(self.is_unsafe(res),
'return of AnsibleContext.resolve (%s) was expected to be marked unsafe but was not' % res)
def test_resolve_unsafe_list(self):
context = self._context(variables={'some_unsafe_key': [wrap_var('some unsafe string 1')]})
res = context.resolve('some_unsafe_key')
# self.assertIsInstance(res[0], AnsibleUnsafe)
self.assertTrue(self.is_unsafe(res),
'return of AnsibleContext.resolve (%s) was expected to be marked unsafe but was not' % res)
def test_resolve_unsafe_dict(self):
context = self._context(variables={'some_unsafe_key':
{'an_unsafe_dict': wrap_var('some unsafe string 1')}
})
res = context.resolve('some_unsafe_key')
self.assertTrue(self.is_unsafe(res['an_unsafe_dict']),
'return of AnsibleContext.resolve (%s) was expected to be marked unsafe but was not' % res['an_unsafe_dict'])
def test_resolve(self):
context = self._context(variables={'some_key': 'some_string'})
res = context.resolve('some_key')
self.assertEqual(res, 'some_string')
# self.assertNotIsInstance(res, AnsibleUnsafe)
self.assertFalse(self.is_unsafe(res),
'return of AnsibleContext.resolve (%s) was not expected to be marked unsafe but was' % res)
def test_resolve_none(self):
context = self._context(variables={'some_key': None})
res = context.resolve('some_key')
self.assertEqual(res, None)
# self.assertNotIsInstance(res, AnsibleUnsafe)
self.assertFalse(self.is_unsafe(res),
'return of AnsibleContext.resolve (%s) was not expected to be marked unsafe but was' % res)
def test_is_unsafe(self):
context = self._context()
self.assertFalse(context._is_unsafe(AnsibleUndefined()))
|
tgbugs/pyontutils
|
refs/heads/master
|
ilxutils/ilxutils/interlex_sql.py
|
1
|
from pathlib import Path
import pandas as pd
from sqlalchemy import create_engine, inspect, Table, Column
from collections import defaultdict
from ilxutils.tools import light_degrade, open_pickle, create_pickle
import os
#ELASTIC = 'https://5f86098ac2b28a982cebf64e82db4ea2.us-west-2.aws.found.io:9243/interlex/term/'
TERMS_COMPLETE_BACKUP_PATH = Path.home()/'Dropbox/interlex_backups/ilx_db_terms_complete_backup.pickle'
TERMS_BACKUP_PATH = Path.home()/'Dropbox/interlex_backups/ilx_db_terms_backup.pickle'
ANNOS_BACKUP_PATH = Path.home()/'Dropbox/interlex_backups/ilx_db_annotations_backup.pickle'
RELAS_BACKUP_PATH = Path.home()/'Dropbox/interlex_backups/ilx_db_relationships_backup.pickle'
SUPER_BACKUP_PATH = Path.home()/'Dropbox/interlex_backups/ilx_db_superclasses_backup.pickle'
SYNOS_BACKUP_PATH = Path.home()/'Dropbox/interlex_backups/ilx_db_synonyms_backup.pickle'
EXIDS_BACKUP_PATH = Path.home()/'Dropbox/interlex_backups/ilx_db_ex_backup.pickle'
class IlxSql():
def __init__(self, db_url, pre_load=False, from_backup=False):
self.db_url = db_url
self.engine = create_engine(self.db_url)
self.local_degrade = lambda string: string.lower().strip() # current degrade of choice for sql
self.from_backup = from_backup
self.terms_complete = self.get_terms_complete() if pre_load else pd.DataFrame
self.terms = self.get_terms() if pre_load else pd.DataFrame
self.superclasses = self.get_superclasses if pre_load else pd.DataFrame
self.annotations = self.get_annotations() if pre_load else pd.DataFrame
self.existing_ids = self.get_existing_ids() if pre_load else pd.DataFrame
self.relationships = self.get_relationships() if pre_load else pd.DataFrame
self.synonyms = self.get_synonyms() if pre_load else pd.DataFrame
def fetch_terms_complete(self):
if self.terms_complete.empty:
return self.get_terms_complete()
return self.terms_complete
def fetch_terms(self):
if self.terms.empty:
return self.get_terms()
return self.terms
def fetch_annotations(self):
if self.annotations.empty:
return self.get_annotations()
return self.annotations
def fetch_existing_ids(self):
if self.existing_ids.empty:
return self.get_existing_ids()
return self.existing_ids
def fetch_relationships(self):
if self.relationships.empty:
return self.get_relationships()
return self.relationships
def fetch_synonyms(self):
if self.synonyms.empty:
return self.get_synonyms()
return self.synonyms
def fetch_superclasses(self):
if self.superclasses.empty:
return self.get_superclasses()
return self.superclasses
def get_terms(self):
''' GROUP BY is a shortcut to only getting the first in every list of group '''
if not self.terms.empty:
return self.terms
if self.from_backup:
self.terms = open_pickle(TERMS_BACKUP_PATH)
return self.terms
engine = create_engine(self.db_url)
data = """
SELECT t.id as tid, t.ilx, t.label, t.definition, t.type, t.comment, t.version, t.uid, t.time
FROM terms t
GROUP BY t.ilx
"""
self.terms = pd.read_sql(data, engine)
create_pickle(self.terms, TERMS_BACKUP_PATH)
return self.terms
def get_annotations(self):
if not self.annotations:
return self.fetch_annotations()
if self.from_backup:
self.annotations = open_pickle(ANNOS_BACKUP_PATH)
return self.annotations
engine = create_engine(self.db_url)
data = """
SELECT
ta.tid, ta.annotation_tid as annotation_type_tid,
t1.ilx as term_ilx, t2.ilx as annotation_type_ilx,
t2.label as annotation_type_label,
ta.value
FROM term_annotations AS ta
JOIN (
SELECT *
FROM terms
GROUP BY terms.ilx
) AS t1 ON ta.tid=t1.id
JOIN (
SELECT *
FROM terms
GROUP BY terms.ilx
) AS t2 ON ta.annotation_tid=t2.id
"""
self.annotations = pd.read_sql(data, engine)
create_pickle(self.annotations, ANNOS_BACKUP_PATH)
return self.annotations
def get_existing_ids(self):
if not self.existing_ids.empty:
return self.existing_ids
if self.from_backup:
self.existing_ids = open_pickle(EXIDS_BACKUP_PATH)
return self.existing_ids
engine = create_engine(self.db_url)
data = """
SELECT tei.tid, tei.curie, tei.iri, tei.preferred, t.ilx, t.label, t.definition
FROM (
SELECT *
FROM terms
GROUP BY terms.ilx
) as t
JOIN term_existing_ids AS tei
ON t.id = tei.tid
"""
self.existing_ids = pd.read_sql(data, engine)
create_pickle(self.existing_ids, EXIDS_BACKUP_PATH)
return self.existing_ids
def get_relationships(self):
if not self.relationships.empty:
return self.relationships
if self.from_backup:
self.relationships = open_pickle(RELAS_BACKUP_PATH)
return self.relationships
engine = create_engine(self.db_url)
data = """
SELECT
t1.id as term1_tid, t1.ilx AS term1_ilx, t1.type as term1_type,
t2.id as term2_tid, t2.ilx AS term2_ilx, t2.type as term2_type,
t3.id as relationship_tid, t3.ilx AS relationship_ilx, t3.label as relationship_label
FROM term_relationships AS tr
JOIN (
SELECT *
FROM terms
GROUP BY terms.ilx
) t1 ON t1.id = tr.term1_id
JOIN (
SELECT *
FROM terms
GROUP BY terms.ilx
) AS t2 ON t2.id = tr.term2_id
JOIN (
SELECT *
FROM terms
GROUP BY terms.ilx
) AS t3 ON t3.id = tr.relationship_tid
"""
self.relationships = pd.read_sql(data, engine)
create_pickle(self.relationships, RELAS_BACKUP_PATH)
return self.relationships
def get_superclasses(self):
if not self.superclasses.empty:
return self.superclasses
if self.from_backup:
self.superclasses = open_pickle(SUPER_BACKUP_PATH)
return self.superclasses
engine = create_engine(self.db_url)
data = """
SELECT
ts.tid, ts.superclass_tid,
t1.label as term_label, t1.ilx as term_ilx,
t2.label as superclass_label, t2.ilx as superclass_ilx
FROM term_superclasses AS ts
JOIN (
SELECT *
FROM terms
GROUP BY terms.ilx
) as t1
ON t1.id = ts.tid
JOIN (
SELECT *
FROM terms
GROUP BY terms.ilx
) AS t2
ON t2.id = ts.superclass_tid
"""
self.superclasses = pd.read_sql(data, engine)
create_pickle(self.superclasses, SUPER_BACKUP_PATH)
return self.superclasses
def get_synonyms(self):
if not self.synonyms.empty:
return self.synonyms
if self.from_backup:
self.synonyms = open_pickle(SYNOS_BACKUP_PATH)
return self.synonyms
engine = create_engine(self.db_url)
data = """
SELECT ts.tid as tid, t.ilx, ts.literal, ts.type
FROM term_synonyms AS ts
JOIN (
SELECT *
FROM terms
GROUP BY terms.ilx
) AS t
WHERE ts.tid=t.id
"""
self.synonyms = pd.read_sql(data, engine)
create_pickle(self.synonyms, SYNOS_BACKUP_PATH)
return self.synonyms
def get_terms_complete(self) -> pd.DataFrame:
''' Gets complete entity data like term/view '''
if not self.terms_complete.empty:
return self.terms_complete
if self.from_backup:
self.terms_complete = open_pickle(TERMS_COMPLETE_BACKUP_PATH)
return self.terms_complete
ilx2synonyms = self.get_ilx2synonyms()
ilx2existing_ids = self.get_ilx2existing_ids()
ilx2annotations = self.get_ilx2annotations()
ilx2superclass = self.get_ilx2superclass()
ilx_complete = []
header = ['Index'] + list(self.fetch_terms().columns)
for row in self.fetch_terms().itertuples():
row = {header[i]:val for i, val in enumerate(row)}
row['synonyms'] = ilx2synonyms.get(row['ilx'])
row['existing_ids'] = ilx2existing_ids[row['ilx']] # if breaks we have worse problems
row['annotations'] = ilx2annotations.get(row['ilx'])
row['superclass'] = ilx2superclass.get(row['ilx'])
ilx_complete.append(row)
terms_complete = pd.DataFrame(ilx_complete)
create_pickle(terms_complete, TERMS_COMPLETE_BACKUP_PATH)
return terms_complete
def get_label2id(self):
self.terms = self.fetch_terms()
visited = {}
label_to_id = defaultdict(lambda: defaultdict(list))
for row in self.terms.itertuples():
label = self.local_degrade(row.label)
if not visited.get((label, row.type, row.ilx)):
if row.type == 'term':
label_to_id[label]['term'].append(int(row.id))
visited[(label, row.type, row.ilx)] = True
elif row.type == 'cde':
label_to_id[label]['cde'].append(int(row.id))
visited[(label, row.type, row.ilx)] = True
elif row.type == 'fde':
label_to_id[label]['fde'].append(int(row.id))
visited[(label, row.type, row.ilx)] = True
return label_to_id
def get_label2ilxs(self):
self.terms = self.fetch_terms()
visited = {}
label_to_ilx = defaultdict(list)
for row in self.terms.itertuples():
label = self.local_degrade(row.label)
if not visited.get((label, row.type, row.ilx)):
label_to_ilx[label].append(str(row.ilx))
visited[(label, row.type, row.ilx)] = True
return label_to_ilx
def get_label2rows(self):
self.terms_complete = self.fetch_terms_complete()
visited = {}
label2rows = defaultdict(list)
header = ['Index'] + list(self.terms_complete.columns)
for row in self.terms_complete.itertuples():
row = {header[i]:val for i, val in enumerate(row)}
label = self.local_degrade(row['label'])
if not visited.get((label, row['type'], row['ilx'])):
label2rows[label].append(row)
visited[(label, row['type'], row['ilx'])] = True
return label2rows
def get_definition2rows(self):
self.terms = self.fetch_terms()
visited = {}
definition2rows = defaultdict(list)
header = ['Index'] + list(self.terms.columns)
for row in self.terms.itertuples():
row = {header[i]:val for i, val in enumerate(row)}
definition = self.local_degrade(row['definition'])
if not definition or definition == ' ':
continue
if not visited.get((definition, row['type'], row['ilx'])):
definition2rows[definition].append(row)
visited[(definition, row['type'], row['ilx'])] = True
return definition2rows
def get_tid2row(self):
tid2row = {}
header = ['Index'] + list(self.fetch_terms().columns)
for row in self.fetch_terms().itertuples():
row = {header[i]:val for i, val in enumerate(row)}
tid2row[row['tid']] = row
return tid2row
def get_ilx2row(self):
ilx2row = {}
header = ['Index'] + list(self.fetch_terms().columns)
for row in self.fetch_terms().itertuples():
row = {header[i]:val for i, val in enumerate(row)}
ilx2row[row['ilx']] = row
return ilx2row
def get_ilx2superclass(self, clean:bool=True):
''' clean: for list of literals only '''
ilx2superclass = defaultdict(list)
header = ['Index'] + list(self.fetch_superclasses().columns)
for row in self.fetch_superclasses().itertuples():
row = {header[i]:val for i, val in enumerate(row)}
if clean:
superclass = {
'tid': row['superclass_tid'],
'ilx': row['superclass_ilx'],
}
ilx2superclass[row['term_ilx']].append(superclass)
elif not clean:
ilx2superclass[row['term_ilx']].append(row)
return ilx2superclass
def get_tid2annotations(self, clean:bool=True):
''' clean: for list of literals only '''
tid2annotations = defaultdict(list)
header = ['Index'] + list(self.fetch_annotations().columns)
for row in self.fetch_annotations().itertuples():
row = {header[i]:val for i, val in enumerate(row)}
if clean:
annotation = {
'tid': row['tid'],
'annotation_type_tid': row['annotation_type_tid'],
'value': row['value'],
'annotation_type_label': row['annotation_type_label'],
}
tid2annotations[row['tid']].append(annotation)
elif not clean:
tid2annotations[row['tid']].append(row)
return tid2annotations
def get_ilx2annotations(self, clean:bool=True):
''' clean: for list of literals only '''
ilx2annotations = defaultdict(list)
header = ['Index'] + list(self.fetch_annotations().columns)
for row in self.fetch_annotations().itertuples():
row = {header[i]:val for i, val in enumerate(row)}
if clean:
annotation = {
'tid': row['tid'],
'annotation_type_tid': row['annotation_type_tid'],
'value': row['value'],
'annotation_type_label': row['annotation_type_label'],
}
ilx2annotations[row['term_ilx']].append(annotation)
elif not clean:
ilx2annotations[row['term_ilx']].append(row)
return ilx2annotations
def get_tid2synonyms(self, clean:bool=True):
''' clean: for list of literals only '''
tid2synonyms = {}
header = ['Index'] + list(self.fetch_synonyms().columns)
for row in self.fetch_synonyms().itertuples():
row = {header[i]:val for i, val in enumerate(row)}
if clean:
synonym = {'literal':row['literal'], 'type':row['type']}
tid2synonyms[row['tid']].append(synonym)
elif not clean:
tid2synonyms[row['tid']].append(row)
return tid2synonyms
def get_ilx2synonyms(self, clean:bool=True):
''' clean: for list of literals only '''
ilx2synonyms = defaultdict(list)
header = ['Index'] + list(self.fetch_synonyms().columns)
for row in self.fetch_synonyms().itertuples():
row = {header[i]:val for i, val in enumerate(row)}
if clean:
synonym = {'literal':row['literal'], 'type':row['type']}
ilx2synonyms[row['ilx']].append(synonym)
elif not clean:
ilx2synonyms[row['ilx']].append(row)
return ilx2synonyms
def get_iri2row(self):
iri2row = {}
header = ['Index'] + list(self.fetch_existing_ids().columns)
for row in self.fetch_existing_ids().itertuples():
row = {header[i]:val for i, val in enumerate(row)}
iri2row[row['iri']] = row
return iri2row
def get_tid2existing_ids(self, clean=True):
tid2existing_ids = defaultdict(list)
header = ['Index'] + list(self.fetch_existing_ids().columns)
for row in self.fetch_existing_ids().itertuples():
row = {header[i]:val for i, val in enumerate(row)}
if clean:
existing_id = {'iri':row['iri'], 'curie':row['curie']}
tid2existing_ids[row['tid']].append(existing_id)
elif not clean:
tid2existing_ids[row['tid']].append(row)
return tid2existing_ids
def get_ilx2existing_ids(self, clean=True):
ilx2existing_ids = defaultdict(list)
header = ['Index'] + list(self.fetch_existing_ids().columns)
for row in self.fetch_existing_ids().itertuples():
row = {header[i]:val for i, val in enumerate(row)}
if clean:
existing_id = {'iri':row['iri'], 'curie':row['curie']}
ilx2existing_ids[row['ilx']].append(existing_id)
elif not clean:
ilx2existing_ids[row['ilx']].append(row)
return ilx2existing_ids
def get_curie2row(self):
curie2row = {}
header = ['Index'] + list(self.fetch_existing_ids().columns)
for row in self.fetch_existing_ids().itertuples():
row = {header[i]:val for i, val in enumerate(row)}
curie2row[row['curie']] = row
return curie2row
def get_fragment2rows(self):
fragement2rows = defaultdict(list)
header = ['Index'] + list(self.fetch_existing_ids().columns)
for row in self.fetch_existing_ids().itertuples():
row = {header[i]:val for i, val in enumerate(row)}
if not row['curie']: # there are a few with no curies that will cause a false positive
continue
fragment = row['curie'].split(':')[-1]
fragement2rows[fragment].append(row)
return fragement2rows
def show_tables(self):
data = "SHOW tables;"
return pd.read_sql(data, self.engine)
def get_table(self, tablename, limit=5):
data = """
SELECT *
FROM {tablename}
LIMIT {limit}
""".format(tablename=tablename, limit=limit)
return pd.read_sql(data, self.engine)
def get_custom(self, data):
return pd.read_sql(data, self.engine)
def main():
db_url = os.environ.get('SCICRUNCH_DB_URL_PRODUCTION')
sql = IlxSql(db_url)
rels = sql.get_relationships()
print(rels.head())
if __name__ == '__main__':
main()
|
dguerri/ansible-modules-core
|
refs/heads/devel
|
cloud/rackspace/rax_files_objects.py
|
99
|
#!/usr/bin/python
# (c) 2013, Paul Durivage <paul.durivage@rackspace.com>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_files_objects
short_description: Upload, download, and delete objects in Rackspace Cloud Files
description:
- Upload, download, and delete objects in Rackspace Cloud Files
version_added: "1.5"
options:
clear_meta:
description:
- Optionally clear existing metadata when applying metadata to existing objects.
Selecting this option is only appropriate when setting type=meta
choices:
- "yes"
- "no"
default: "no"
container:
description:
- The container to use for file object operations.
required: true
default: null
dest:
description:
- The destination of a "get" operation; i.e. a local directory, "/home/user/myfolder".
Used to specify the destination of an operation on a remote object; i.e. a file name,
"file1", or a comma-separated list of remote objects, "file1,file2,file17"
expires:
description:
- Used to set an expiration on a file or folder uploaded to Cloud Files.
Requires an integer, specifying expiration in seconds
default: null
meta:
description:
- A hash of items to set as metadata values on an uploaded file or folder
default: null
method:
description:
- The method of operation to be performed. For example, put to upload files
to Cloud Files, get to download files from Cloud Files or delete to delete
remote objects in Cloud Files
choices:
- get
- put
- delete
default: get
src:
description:
- Source from which to upload files. Used to specify a remote object as a source for
an operation, i.e. a file name, "file1", or a comma-separated list of remote objects,
"file1,file2,file17". src and dest are mutually exclusive on remote-only object operations
default: null
structure:
description:
- Used to specify whether to maintain nested directory structure when downloading objects
from Cloud Files. Setting to false downloads the contents of a container to a single,
flat directory
choices:
- yes
- "no"
default: "yes"
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
type:
description:
- Type of object to do work on
- Metadata object or a file object
choices:
- file
- meta
default: file
author: "Paul Durivage (@angstwad)"
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
- name: "Test Cloud Files Objects"
hosts: local
gather_facts: False
tasks:
- name: "Get objects from test container"
rax_files_objects: container=testcont dest=~/Downloads/testcont
- name: "Get single object from test container"
rax_files_objects: container=testcont src=file1 dest=~/Downloads/testcont
- name: "Get several objects from test container"
rax_files_objects: container=testcont src=file1,file2,file3 dest=~/Downloads/testcont
- name: "Delete one object in test container"
rax_files_objects: container=testcont method=delete dest=file1
- name: "Delete several objects in test container"
rax_files_objects: container=testcont method=delete dest=file2,file3,file4
- name: "Delete all objects in test container"
rax_files_objects: container=testcont method=delete
- name: "Upload all files to test container"
rax_files_objects: container=testcont method=put src=~/Downloads/onehundred
- name: "Upload one file to test container"
rax_files_objects: container=testcont method=put src=~/Downloads/testcont/file1
- name: "Upload one file to test container with metadata"
rax_files_objects:
container: testcont
src: ~/Downloads/testcont/file2
method: put
meta:
testkey: testdata
who_uploaded_this: someuser@example.com
- name: "Upload one file to test container with TTL of 60 seconds"
rax_files_objects: container=testcont method=put src=~/Downloads/testcont/file3 expires=60
- name: "Attempt to get remote object that does not exist"
rax_files_objects: container=testcont method=get src=FileThatDoesNotExist.jpg dest=~/Downloads/testcont
ignore_errors: yes
- name: "Attempt to delete remote object that does not exist"
rax_files_objects: container=testcont method=delete dest=FileThatDoesNotExist.jpg
ignore_errors: yes
- name: "Test Cloud Files Objects Metadata"
hosts: local
gather_facts: false
tasks:
- name: "Get metadata on one object"
rax_files_objects: container=testcont type=meta dest=file2
- name: "Get metadata on several objects"
rax_files_objects: container=testcont type=meta src=file2,file1
- name: "Set metadata on an object"
rax_files_objects:
container: testcont
type: meta
dest: file17
method: put
meta:
key1: value1
key2: value2
clear_meta: true
- name: "Verify metadata is set"
rax_files_objects: container=testcont type=meta src=file17
- name: "Delete metadata"
rax_files_objects:
container: testcont
type: meta
dest: file17
method: delete
meta:
key1: ''
key2: ''
- name: "Get metadata on all objects"
rax_files_objects: container=testcont type=meta
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
EXIT_DICT = dict(success=False)
META_PREFIX = 'x-object-meta-'
def _get_container(module, cf, container):
try:
return cf.get_container(container)
except pyrax.exc.NoSuchContainer, e:
module.fail_json(msg=e.message)
def upload(module, cf, container, src, dest, meta, expires):
""" Uploads a single object or a folder to Cloud Files Optionally sets an
metadata, TTL value (expires), or Content-Disposition and Content-Encoding
headers.
"""
c = _get_container(module, cf, container)
num_objs_before = len(c.get_object_names())
if not src:
module.fail_json(msg='src must be specified when uploading')
src = os.path.abspath(os.path.expanduser(src))
is_dir = os.path.isdir(src)
if not is_dir and not os.path.isfile(src) or not os.path.exists(src):
module.fail_json(msg='src must be a file or a directory')
if dest and is_dir:
module.fail_json(msg='dest cannot be set when whole '
'directories are uploaded')
cont_obj = None
if dest and not is_dir:
try:
cont_obj = c.upload_file(src, obj_name=dest, ttl=expires)
except Exception, e:
module.fail_json(msg=e.message)
elif is_dir:
try:
id, total_bytes = cf.upload_folder(src, container=c.name, ttl=expires)
except Exception, e:
module.fail_json(msg=e.message)
while True:
bytes = cf.get_uploaded(id)
if bytes == total_bytes:
break
time.sleep(1)
else:
try:
cont_obj = c.upload_file(src, ttl=expires)
except Exception, e:
module.fail_json(msg=e.message)
num_objs_after = len(c.get_object_names())
if not meta:
meta = dict()
meta_result = dict()
if meta:
if cont_obj:
meta_result = cont_obj.set_metadata(meta)
else:
def _set_meta(objs, meta):
""" Sets metadata on a list of objects specified by name """
for obj in objs:
try:
result = c.get_object(obj).set_metadata(meta)
except Exception, e:
module.fail_json(msg=e.message)
else:
meta_result[obj] = result
return meta_result
def _walker(objs, path, filenames):
""" Callback func for os.path.walk """
prefix = ''
if path != src:
prefix = path.split(src)[-1].lstrip('/')
filenames = [os.path.join(prefix, name) for name in filenames
if not os.path.isdir(name)]
objs += filenames
_objs = []
os.path.walk(src, _walker, _objs)
meta_result = _set_meta(_objs, meta)
EXIT_DICT['success'] = True
EXIT_DICT['container'] = c.name
EXIT_DICT['msg'] = "Uploaded %s to container: %s" % (src, c.name)
if cont_obj or locals().get('bytes'):
EXIT_DICT['changed'] = True
if meta_result:
EXIT_DICT['meta'] = dict(updated=True)
if cont_obj:
EXIT_DICT['bytes'] = cont_obj.total_bytes
EXIT_DICT['etag'] = cont_obj.etag
else:
EXIT_DICT['bytes'] = total_bytes
module.exit_json(**EXIT_DICT)
def download(module, cf, container, src, dest, structure):
""" Download objects from Cloud Files to a local path specified by "dest".
Optionally disable maintaining a directory structure by by passing a
false value to "structure".
"""
# Looking for an explicit destination
if not dest:
module.fail_json(msg='dest is a required argument when '
'downloading from Cloud Files')
# Attempt to fetch the container by name
c = _get_container(module, cf, container)
# Accept a single object name or a comma-separated list of objs
# If not specified, get the entire container
if src:
objs = src.split(',')
objs = map(str.strip, objs)
else:
objs = c.get_object_names()
dest = os.path.abspath(os.path.expanduser(dest))
is_dir = os.path.isdir(dest)
if not is_dir:
module.fail_json(msg='dest must be a directory')
results = []
for obj in objs:
try:
c.download_object(obj, dest, structure=structure)
except Exception, e:
module.fail_json(msg=e.message)
else:
results.append(obj)
len_results = len(results)
len_objs = len(objs)
EXIT_DICT['container'] = c.name
EXIT_DICT['requested_downloaded'] = results
if results:
EXIT_DICT['changed'] = True
if len_results == len_objs:
EXIT_DICT['success'] = True
EXIT_DICT['msg'] = "%s objects downloaded to %s" % (len_results, dest)
else:
EXIT_DICT['msg'] = "Error: only %s of %s objects were " \
"downloaded" % (len_results, len_objs)
module.exit_json(**EXIT_DICT)
def delete(module, cf, container, src, dest):
""" Delete specific objects by proving a single file name or a
comma-separated list to src OR dest (but not both). Omitting file name(s)
assumes the entire container is to be deleted.
"""
objs = None
if src and dest:
module.fail_json(msg="Error: ambiguous instructions; files to be deleted "
"have been specified on both src and dest args")
elif dest:
objs = dest
else:
objs = src
c = _get_container(module, cf, container)
if objs:
objs = objs.split(',')
objs = map(str.strip, objs)
else:
objs = c.get_object_names()
num_objs = len(objs)
results = []
for obj in objs:
try:
result = c.delete_object(obj)
except Exception, e:
module.fail_json(msg=e.message)
else:
results.append(result)
num_deleted = results.count(True)
EXIT_DICT['container'] = c.name
EXIT_DICT['deleted'] = num_deleted
EXIT_DICT['requested_deleted'] = objs
if num_deleted:
EXIT_DICT['changed'] = True
if num_objs == num_deleted:
EXIT_DICT['success'] = True
EXIT_DICT['msg'] = "%s objects deleted" % num_deleted
else:
EXIT_DICT['msg'] = ("Error: only %s of %s objects "
"deleted" % (num_deleted, num_objs))
module.exit_json(**EXIT_DICT)
def get_meta(module, cf, container, src, dest):
""" Get metadata for a single file, comma-separated list, or entire
container
"""
c = _get_container(module, cf, container)
objs = None
if src and dest:
module.fail_json(msg="Error: ambiguous instructions; files to be deleted "
"have been specified on both src and dest args")
elif dest:
objs = dest
else:
objs = src
if objs:
objs = objs.split(',')
objs = map(str.strip, objs)
else:
objs = c.get_object_names()
results = dict()
for obj in objs:
try:
meta = c.get_object(obj).get_metadata()
except Exception, e:
module.fail_json(msg=e.message)
else:
results[obj] = dict()
for k, v in meta.items():
meta_key = k.split(META_PREFIX)[-1]
results[obj][meta_key] = v
EXIT_DICT['container'] = c.name
if results:
EXIT_DICT['meta_results'] = results
EXIT_DICT['success'] = True
module.exit_json(**EXIT_DICT)
def put_meta(module, cf, container, src, dest, meta, clear_meta):
""" Set metadata on a container, single file, or comma-separated list.
Passing a true value to clear_meta clears the metadata stored in Cloud
Files before setting the new metadata to the value of "meta".
"""
objs = None
if src and dest:
module.fail_json(msg="Error: ambiguous instructions; files to set meta"
" have been specified on both src and dest args")
elif dest:
objs = dest
else:
objs = src
objs = objs.split(',')
objs = map(str.strip, objs)
c = _get_container(module, cf, container)
results = []
for obj in objs:
try:
result = c.get_object(obj).set_metadata(meta, clear=clear_meta)
except Exception, e:
module.fail_json(msg=e.message)
else:
results.append(result)
EXIT_DICT['container'] = c.name
EXIT_DICT['success'] = True
if results:
EXIT_DICT['changed'] = True
EXIT_DICT['num_changed'] = True
module.exit_json(**EXIT_DICT)
def delete_meta(module, cf, container, src, dest, meta):
""" Removes metadata keys and values specified in meta, if any. Deletes on
all objects specified by src or dest (but not both), if any; otherwise it
deletes keys on all objects in the container
"""
objs = None
if src and dest:
module.fail_json(msg="Error: ambiguous instructions; meta keys to be "
"deleted have been specified on both src and dest"
" args")
elif dest:
objs = dest
else:
objs = src
objs = objs.split(',')
objs = map(str.strip, objs)
c = _get_container(module, cf, container)
results = [] # Num of metadata keys removed, not objects affected
for obj in objs:
if meta:
for k, v in meta.items():
try:
result = c.get_object(obj).remove_metadata_key(k)
except Exception, e:
module.fail_json(msg=e.message)
else:
results.append(result)
else:
try:
o = c.get_object(obj)
except pyrax.exc.NoSuchObject, e:
module.fail_json(msg=e.message)
for k, v in o.get_metadata().items():
try:
result = o.remove_metadata_key(k)
except Exception, e:
module.fail_json(msg=e.message)
results.append(result)
EXIT_DICT['container'] = c.name
EXIT_DICT['success'] = True
if results:
EXIT_DICT['changed'] = True
EXIT_DICT['num_deleted'] = len(results)
module.exit_json(**EXIT_DICT)
def cloudfiles(module, container, src, dest, method, typ, meta, clear_meta,
structure, expires):
""" Dispatch from here to work with metadata or file objects """
cf = pyrax.cloudfiles
if cf is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if typ == "file":
if method == 'put':
upload(module, cf, container, src, dest, meta, expires)
elif method == 'get':
download(module, cf, container, src, dest, structure)
elif method == 'delete':
delete(module, cf, container, src, dest)
else:
if method == 'get':
get_meta(module, cf, container, src, dest)
if method == 'put':
put_meta(module, cf, container, src, dest, meta, clear_meta)
if method == 'delete':
delete_meta(module, cf, container, src, dest, meta)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
container=dict(required=True),
src=dict(),
dest=dict(),
method=dict(default='get', choices=['put', 'get', 'delete']),
type=dict(default='file', choices=['file', 'meta']),
meta=dict(type='dict', default=dict()),
clear_meta=dict(default=False, type='bool'),
structure=dict(default=True, type='bool'),
expires=dict(type='int'),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
container = module.params.get('container')
src = module.params.get('src')
dest = module.params.get('dest')
method = module.params.get('method')
typ = module.params.get('type')
meta = module.params.get('meta')
clear_meta = module.params.get('clear_meta')
structure = module.params.get('structure')
expires = module.params.get('expires')
if clear_meta and not typ == 'meta':
module.fail_json(msg='clear_meta can only be used when setting metadata')
setup_rax_module(module, pyrax)
cloudfiles(module, container, src, dest, method, typ, meta, clear_meta, structure, expires)
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
main()
|
mnahm5/django-estore
|
refs/heads/master
|
Lib/site-packages/cffi/verifier.py
|
56
|
#
# DEPRECATED: implementation for ffi.verify()
#
import sys, os, binascii, shutil, io
from . import __version_verifier_modules__
from . import ffiplatform
if sys.version_info >= (3, 3):
import importlib.machinery
def _extension_suffixes():
return importlib.machinery.EXTENSION_SUFFIXES[:]
else:
import imp
def _extension_suffixes():
return [suffix for suffix, _, type in imp.get_suffixes()
if type == imp.C_EXTENSION]
if sys.version_info >= (3,):
NativeIO = io.StringIO
else:
class NativeIO(io.BytesIO):
def write(self, s):
if isinstance(s, unicode):
s = s.encode('ascii')
super(NativeIO, self).write(s)
def _hack_at_distutils():
# Windows-only workaround for some configurations: see
# https://bugs.python.org/issue23246 (Python 2.7 with
# a specific MS compiler suite download)
if sys.platform == "win32":
try:
import setuptools # for side-effects, patches distutils
except ImportError:
pass
class Verifier(object):
def __init__(self, ffi, preamble, tmpdir=None, modulename=None,
ext_package=None, tag='', force_generic_engine=False,
source_extension='.c', flags=None, relative_to=None, **kwds):
if ffi._parser._uses_new_feature:
raise ffiplatform.VerificationError(
"feature not supported with ffi.verify(), but only "
"with ffi.set_source(): %s" % (ffi._parser._uses_new_feature,))
self.ffi = ffi
self.preamble = preamble
if not modulename:
flattened_kwds = ffiplatform.flatten(kwds)
vengine_class = _locate_engine_class(ffi, force_generic_engine)
self._vengine = vengine_class(self)
self._vengine.patch_extension_kwds(kwds)
self.flags = flags
self.kwds = self.make_relative_to(kwds, relative_to)
#
if modulename:
if tag:
raise TypeError("can't specify both 'modulename' and 'tag'")
else:
key = '\x00'.join([sys.version[:3], __version_verifier_modules__,
preamble, flattened_kwds] +
ffi._cdefsources)
if sys.version_info >= (3,):
key = key.encode('utf-8')
k1 = hex(binascii.crc32(key[0::2]) & 0xffffffff)
k1 = k1.lstrip('0x').rstrip('L')
k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff)
k2 = k2.lstrip('0').rstrip('L')
modulename = '_cffi_%s_%s%s%s' % (tag, self._vengine._class_key,
k1, k2)
suffix = _get_so_suffixes()[0]
self.tmpdir = tmpdir or _caller_dir_pycache()
self.sourcefilename = os.path.join(self.tmpdir, modulename + source_extension)
self.modulefilename = os.path.join(self.tmpdir, modulename + suffix)
self.ext_package = ext_package
self._has_source = False
self._has_module = False
def write_source(self, file=None):
"""Write the C source code. It is produced in 'self.sourcefilename',
which can be tweaked beforehand."""
with self.ffi._lock:
if self._has_source and file is None:
raise ffiplatform.VerificationError(
"source code already written")
self._write_source(file)
def compile_module(self):
"""Write the C source code (if not done already) and compile it.
This produces a dynamic link library in 'self.modulefilename'."""
with self.ffi._lock:
if self._has_module:
raise ffiplatform.VerificationError("module already compiled")
if not self._has_source:
self._write_source()
self._compile_module()
def load_library(self):
"""Get a C module from this Verifier instance.
Returns an instance of a FFILibrary class that behaves like the
objects returned by ffi.dlopen(), but that delegates all
operations to the C module. If necessary, the C code is written
and compiled first.
"""
with self.ffi._lock:
if not self._has_module:
self._locate_module()
if not self._has_module:
if not self._has_source:
self._write_source()
self._compile_module()
return self._load_library()
def get_module_name(self):
basename = os.path.basename(self.modulefilename)
# kill both the .so extension and the other .'s, as introduced
# by Python 3: 'basename.cpython-33m.so'
basename = basename.split('.', 1)[0]
# and the _d added in Python 2 debug builds --- but try to be
# conservative and not kill a legitimate _d
if basename.endswith('_d') and hasattr(sys, 'gettotalrefcount'):
basename = basename[:-2]
return basename
def get_extension(self):
_hack_at_distutils() # backward compatibility hack
if not self._has_source:
with self.ffi._lock:
if not self._has_source:
self._write_source()
sourcename = ffiplatform.maybe_relative_path(self.sourcefilename)
modname = self.get_module_name()
return ffiplatform.get_extension(sourcename, modname, **self.kwds)
def generates_python_module(self):
return self._vengine._gen_python_module
def make_relative_to(self, kwds, relative_to):
if relative_to and os.path.dirname(relative_to):
dirname = os.path.dirname(relative_to)
kwds = kwds.copy()
for key in ffiplatform.LIST_OF_FILE_NAMES:
if key in kwds:
lst = kwds[key]
if not isinstance(lst, (list, tuple)):
raise TypeError("keyword '%s' should be a list or tuple"
% (key,))
lst = [os.path.join(dirname, fn) for fn in lst]
kwds[key] = lst
return kwds
# ----------
def _locate_module(self):
if not os.path.isfile(self.modulefilename):
if self.ext_package:
try:
pkg = __import__(self.ext_package, None, None, ['__doc__'])
except ImportError:
return # cannot import the package itself, give up
# (e.g. it might be called differently before installation)
path = pkg.__path__
else:
path = None
filename = self._vengine.find_module(self.get_module_name(), path,
_get_so_suffixes())
if filename is None:
return
self.modulefilename = filename
self._vengine.collect_types()
self._has_module = True
def _write_source_to(self, file):
self._vengine._f = file
try:
self._vengine.write_source_to_f()
finally:
del self._vengine._f
def _write_source(self, file=None):
if file is not None:
self._write_source_to(file)
else:
# Write our source file to an in memory file.
f = NativeIO()
self._write_source_to(f)
source_data = f.getvalue()
# Determine if this matches the current file
if os.path.exists(self.sourcefilename):
with open(self.sourcefilename, "r") as fp:
needs_written = not (fp.read() == source_data)
else:
needs_written = True
# Actually write the file out if it doesn't match
if needs_written:
_ensure_dir(self.sourcefilename)
with open(self.sourcefilename, "w") as fp:
fp.write(source_data)
# Set this flag
self._has_source = True
def _compile_module(self):
# compile this C source
tmpdir = os.path.dirname(self.sourcefilename)
outputfilename = ffiplatform.compile(tmpdir, self.get_extension())
try:
same = ffiplatform.samefile(outputfilename, self.modulefilename)
except OSError:
same = False
if not same:
_ensure_dir(self.modulefilename)
shutil.move(outputfilename, self.modulefilename)
self._has_module = True
def _load_library(self):
assert self._has_module
if self.flags is not None:
return self._vengine.load_library(self.flags)
else:
return self._vengine.load_library()
# ____________________________________________________________
_FORCE_GENERIC_ENGINE = False # for tests
def _locate_engine_class(ffi, force_generic_engine):
if _FORCE_GENERIC_ENGINE:
force_generic_engine = True
if not force_generic_engine:
if '__pypy__' in sys.builtin_module_names:
force_generic_engine = True
else:
try:
import _cffi_backend
except ImportError:
_cffi_backend = '?'
if ffi._backend is not _cffi_backend:
force_generic_engine = True
if force_generic_engine:
from . import vengine_gen
return vengine_gen.VGenericEngine
else:
from . import vengine_cpy
return vengine_cpy.VCPythonEngine
# ____________________________________________________________
_TMPDIR = None
def _caller_dir_pycache():
if _TMPDIR:
return _TMPDIR
result = os.environ.get('CFFI_TMPDIR')
if result:
return result
filename = sys._getframe(2).f_code.co_filename
return os.path.abspath(os.path.join(os.path.dirname(filename),
'__pycache__'))
def set_tmpdir(dirname):
"""Set the temporary directory to use instead of __pycache__."""
global _TMPDIR
_TMPDIR = dirname
def cleanup_tmpdir(tmpdir=None, keep_so=False):
"""Clean up the temporary directory by removing all files in it
called `_cffi_*.{c,so}` as well as the `build` subdirectory."""
tmpdir = tmpdir or _caller_dir_pycache()
try:
filelist = os.listdir(tmpdir)
except OSError:
return
if keep_so:
suffix = '.c' # only remove .c files
else:
suffix = _get_so_suffixes()[0].lower()
for fn in filelist:
if fn.lower().startswith('_cffi_') and (
fn.lower().endswith(suffix) or fn.lower().endswith('.c')):
try:
os.unlink(os.path.join(tmpdir, fn))
except OSError:
pass
clean_dir = [os.path.join(tmpdir, 'build')]
for dir in clean_dir:
try:
for fn in os.listdir(dir):
fn = os.path.join(dir, fn)
if os.path.isdir(fn):
clean_dir.append(fn)
else:
os.unlink(fn)
except OSError:
pass
def _get_so_suffixes():
suffixes = _extension_suffixes()
if not suffixes:
# bah, no C_EXTENSION available. Occurs on pypy without cpyext
if sys.platform == 'win32':
suffixes = [".pyd"]
else:
suffixes = [".so"]
return suffixes
def _ensure_dir(filename):
try:
os.makedirs(os.path.dirname(filename))
except OSError:
pass
|
Alexpux/MSYS2-pacman
|
refs/heads/master
|
test/pacman/tests/remove031.py
|
23
|
self.description = "Remove a package in HoldPkg"
p1 = pmpkg("foopkg")
self.addpkg2db("local", p1)
self.option["HoldPkg"] = ["???pkg"]
self.args = "-R %s" % p1.name
self.addrule("PACMAN_RETCODE=1")
self.addrule("PKG_EXIST=foopkg")
|
emilroz/openmicroscopy
|
refs/heads/develop
|
components/tools/OmeroPy/src/omero/util/ROI_utils.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
#------------------------------------------------------------------------------
# Copyright (C) 2006-2009 University of Dundee. All rights reserved.
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#------------------------------------------------------------------------------
###
#
# ROIUtils allows the mapping of omero.model.ROIDataTypesI to python types
# and to create ROIDataTypesI from ROIUtil types.
# These methods also implement the acceptVisitor method linking to the ROIDrawingCanvas.
#
#
# @author Jean-Marie Burel
# <a href="mailto:j.burel@dundee.ac.uk">j.burel@dundee.ac.uk</a>
# @author Donald MacDonald
# <a href="mailto:donald@lifesci.dundee.ac.uk">donald@lifesci.dundee.ac.uk</a>
# @version 3.0
# <small>
# (<b>Internal version:</b> $Revision: $Date: $)
# </small>
# @since 3.0-Beta4
#/
import omero.clients
from omero.model import RoiI
from omero.model import EllipseI
from omero.model import LineI
from omero.model import RectI
from omero.model import PointI
from omero.model import PolylineI
from omero.model import PolygonI
from omero.model import PathI
from omero.model import MaskI
from omero.rtypes import rdouble
from omero.rtypes import rstring
from omero.rtypes import rint
from omero.rtypes import rfloat
##
# abstract, defines the method that call it as abstract.
#
#
def abstract():
import inspect
caller = inspect.getouterframes(inspect.currentframe())[1][3]
raise NotImplementedError(caller + ' must be implemented in subclass')
##
# ShapeSettingsData contains all the display information about the ROI that aggregates it.
#
class ShapeSettingsData:
##
# Initialises the default values of the ShapeSettings.
# Stroke has default colour of darkGrey
# StrokeWidth defaults to 1
#
def __init__(self):
self.WHITE = 16777215
self.BLACK = 0
self.GREY = 11184810
self.strokeColour = rint(self.GREY)
self.strokeWidth = rint(1)
self.strokeDashArray = rstring('')
self.strokeDashOffset = rint(0)
self.strokeLineCap = rstring('')
self.strokeLineJoin = rstring('')
self.strokeMiterLimit = rint(0)
self.fillColour = rint(self.GREY)
self.fillRule = rstring('')
##
# Applies the settings in the ShapeSettingsData to the ROITypeI
# @param shape the omero.model.ROITypeI that these settings will be applied to
#
def setROIShapeSettings(self, shape):
shape.setStrokeColor(self.strokeColour);
shape.setStrokeWidth(self.strokeWidth);
shape.setStrokeDashArray(self.strokeDashArray);
shape.setStrokeDashOffset(self.strokeDashOffset);
shape.setStrokeLineCap(self.strokeLineCap);
shape.setStrokeLineJoin(self.strokeLineJoin);
shape.setStrokeMiterLimit(self.strokeMiterLimit);
shape.setFillColor(self.fillColour);
shape.setFillRule(self.fillRule);
##
# Set the Stroke settings of the ShapeSettings.
# @param colour The colour of the stroke.
# @param width The stroke width.
#
def setStrokeSettings(self, colour, width = 1):
self.strokeColour = rint(colour);
self.strokeWidth = rint(width);
###
# Set the Fill Settings for the ShapeSettings.
# @param colour The fill colour of the shape.
def setFillSettings(self, colour):
self.fillColour = rstring(colour);
##
# Get the stroke settings as the tuple (strokeColour, strokeWidth).
# @return See above.
#
def getStrokeSettings(self):
return (self.strokeColour.getValue(), self.strokeWidth.getValue());
##
# Get the fill setting as a tuple of (fillColour)
# @return See above.
#
def getFillSettings(self):
return (self.fillColour.getValue());
##
# Get the tuple ((stokeColor, strokeWidth), (fillColour)).
# @return see above.
#
def getSettings(self):
return (self.getStrokeSettings(), self.getFillSettings());
##
# Set the current shapeSettings from the ROI roi.
# @param roi see above.
#
def getShapeSettingsFromROI(self, roi):
self.strokeColour = roi.getStrokeColor();
self.strokeWidth = roi.getStrokeWidth();
self.strokeDashArray = roi.getStrokeDashArray();
self.strokeDashOffset = roi.getStrokeDashOffset();
self.strokeLineCap = roi.getStrokeLineCap();
self.strokeLineJoin = roi.getStrokeLineJoin();
self.strokeMiterLimit = roi.getStrokeMiterLimit();
self.fillColour = roi.getFillColor();
self.fillRule = roi.getFillRule();
##
# This class stores the ROI Coordinate (Z,T).
#
class ROICoordinate:
##
# Initialise the ROICoordinate.
# @param z The z-section.
# @param t The timepoint.
def __init__(self, z = 0, t = 0):
self.theZ = rint(z);
self.theT = rint(t);
##
# Set the (z, t) for the roi using the (z, t) of the ROICoordinate.
# @param roi The ROI to set the (z, t) on.
#
def setROICoord(self, roi):
roi.setTheZ(self.theZ);
roi.setTheT(self.theT);
##
# Get the (z, t) from the ROI.
# @param See above.
#
def setCoordFromROI(self, roi):
self.theZ = roi.getTheZ();
self.theT = roi.getTheT();
##
# Interface to inherit for accepting ROIDrawing as a visitor.
# @param visitor The ROIDrawingCompoent.
#
class ROIDrawingI:
def acceptVisitor(self, visitor):
abstract();
##
# The base class for all ROIShapeData objects.
#
class ShapeData:
##
# Constructor sets up the coord, shapeSettings and ROI objects.
#
def __init__(self):
self.coord = ROICoordinate();
self.shapeSettings = ShapeSettingsData();
self.ROI = None;
##
# Set the coord of the class to coord.
# @param See above.
#
def setCoord(self, coord):
self.coord = coord;
##
# Set the ROICoordinate of the roi.
# @param roi See above.
#
def setROICoord(self, roi):
self.coord.setROICoord(roi);
##
# Set the Geometry of the roi from the geometry in ShapeData.
# @param roi See above.
#
def setROIGeometry(self, roi):
abstract();
##
# Set the Settings of the ShapeDate form the settings object.
# @param settings See above.
#
def setShapeSettings(self, settings):
self.shapeSettings = settings;
##
# Set the Settings of the roi from the setting in ShapeData.
# @param roi See above.
#
def setROIShapeSettings(self, roi):
self.shapeSettings.setROIShapeSettings(roi);
##
# Accept visitor.
# @param visitor See above.
#
def acceptVisitor(self, visitor):
abstract();
##
# Create the base type of ROI for this shape.
#
def createBaseType(self):
abstract();
##
# Get the roi from the ShapeData. If the roi already exists return it.
# Otherwise create it from the ShapeData and return it.
# @return See above.
#
def getROI(self):
if(self.roi != None):
return self.roi;
self.roi = self.createBaseType();
self.setROICoord(self.roi);
self.setROIGeometry(self.roi);
self.setROIShapeSettings(self.roi);
return self.roi;
##
# Set the shape settings object from the roi.
# @param roi see above.
#
def getShapeSettingsFromROI(self, roi):
self.shapeSettings.getShapeSettingsFromROI(roi);
##
# Set the ROICoordinate from the roi.
# @param roi See above.
#
def getCoordFromROI(self, roi):
self.coord.setCoordFromROI(roi);
##
# Set the Geometr from the roi.
# @param roi See above.
#
def getGeometryFromROI(self , roi):
abstract();
##
# Get all settings from the roi, Geomerty, Shapesettins, ROICoordinate.
# @param roi See above.
#
def fromROI(self, roi):
self.roi = roi;
self.getShapeSettingsFromROI(roi);
self.getCoordFromROI(roi);
self.getGeometryFromROI(roi);
##
# The EllispeData class contains all the manipulation and create of EllipseI
# types.
# It also accepts the ROIDrawingUtils visitor for drawing ellipses.
#
class EllipseData(ShapeData, ROIDrawingI):
##
# Constructor for EllipseData object.
# @param roicoord The ROICoordinate of the object (default: 0,0)
# @param cx The centre x coordinate of the ellipse.
# @param cy The centre y coordinate of the ellipse.
# @param rx The major axis of the ellipse.
# @param ry The minor axis of the ellipse.
def __init__(self, roicoord = ROICoordinate(), cx = 0, cy = 0, rx = 0, ry = 0):
ShapeData.__init__(self);
self.cx = rdouble(cx);
self.cy = rdouble(cy);
self.rx = rdouble(rx);
self.ry = rdouble(ry);
self.setCoord(roicoord);
##
# overridden, @See ShapeData#setROIGeometry
#
def setROIGeometry(self, ellipse):
ellipse.setTheZ(self.coord.theZ);
ellipse.setTheT(self.coord.theZ);
ellipse.setCx(self.cx);
ellipse.setCy(self.cy);
ellipse.setRx(self.rx);
ellipse.setRy(self.ry);
##
# overridden, @See ShapeData#getGeometryFromROI
#
def getGeometryFromROI(self, roi):
self.cx = roi.getCx();
self.cy = roi.getCy();
self.rx = roi.getRx();
self.ry = roi.getRy();
##
# overridden, @See ShapeData#createBaseType
#
def createBaseType(self):
return EllipseI();
##
# overridden, @See ShapeData#acceptVisitor
#
def acceptVisitor(self, visitor):
visitor.drawEllipse(self.cx.getValue(), self.cy.getValue(), self.rx.getValue(), self.ry.getValue(), self.shapeSettings.getSettings());
##
# The RectangleData class contains all the manipulation and create of RectI
# types.
# It also accepts the ROIDrawingUtils visitor for drawing rectangles.
#
class RectangleData(ShapeData, ROIDrawingI):
##
# Constructor for RectangleData object.
# @param roicoord The ROICoordinate of the object (default: 0,0)
# @param x The top left x - coordinate of the shape.
# @param y The top left y - coordinate of the shape.
# @param width The width of the shape.
# @param height The height of the shape.
def __init__(self, roicoord = ROICoordinate(), x = 0, y = 0, width = 0, height = 0):
ShapeData.__init__(self);
self.x = rdouble(x);
self.y = rdouble(y);
self.width = rdouble(width);
self.height = rdouble(height);
self.setCoord(roicoord);
##
# overridden, @See ShapeData#setGeometry
#
def setGeometry(self, rectangle):
rectangle.setTheZ(self.coord.theZ);
rectangle.setTheT(self.coord.theZ);
rectangle.setX(self.x);
rectangle.setY(self.y);
rectangle.setWidth(self.width);
rectangle.setHeight(self.height);
##
# overridden, @See ShapeData#getGeometryFromROI
#
def getGeometryFromROI(self, roi):
self.x = roi.getX();
self.y = roi.getY();
self.width = roi.getWidth();
self.height = roi.getHeight();
##
# overridden, @See ShapeData#createBaseType
#
def createBaseType(self):
return RectI();
##
# overridden, @See ShapeData#acceptVisitor
#
def acceptVisitor(self, visitor):
visitor.drawRectangle(self.x, self.y, self.width, self.height, self.shapeSettings.getSettings());
##
# The LineData class contains all the manipulation and create of LineI
# types.
# It also accepts the ROIDrawingUtils visitor for drawing lines.
#
class LineData(ShapeData, ROIDrawingI):
##
# Constructor for LineData object.
# @param roicoord The ROICoordinate of the object (default: 0,0)
# @param x1 The first x coordinate of the shape.
# @param y1 The first y coordinate of the shape.
# @param x2 The second x coordinate of the shape.
# @param y2 The second y coordinate of the shape.
def __init__(self, roicoord = ROICoordinate(), x1 = 0, y1 = 0, x2 = 0, y2 = 0):
ShapeData.__init__(self);
self.x1 = rdouble(x1);
self.y1 = rdouble(y1);
self.x2 = rdouble(x2);
self.y2 = rdouble(y2);
self.setCoord(roicoord);
##
# overridden, @See ShapeData#setGeometry
#
def setGeometry(self, line):
line.setTheZ(self.coord.theZ);
line.setTheT(self.coord.theZ);
line.setX1(self.x1);
line.setY1(self.y1);
line.setX2(self.x2);
line.setY2(self.y2);
##
# overridden, @See ShapeData#getGeometryFromROI
#
def getGeometryFromROI(self, roi):
self.x1 = roi.getX1();
self.y1 = roi.getY1();
self.x2 = roi.getX2();
self.y2 = roi.getY2();
##
# overridden, @See ShapeData#createBaseType
#
def createBaseType(self):
return LineI();
##
# overridden, @See ShapeData#acceptVisitor
#
def acceptVisitor(self, visitor):
visitor.drawLine(self.x1.getValue(), self.y1.getValue(), self.x2.getValue(), self.y2.getValue(), self.shapeSettings.getSettings());
##
# The MaskData class contains all the manipulation and create of MaskI
# types.
# It also accepts the ROIDrawingUtils visitor for drawing masks.
#
class MaskData(ShapeData, ROIDrawingI):
##
# Constructor for MaskData object.
# @param roicoord The ROICoordinate of the object (default: 0,0)
# @param bytes The mask data.
# @param x The top left x - coordinate of the shape.
# @param y The top left y - coordinate of the shape.
# @param width The width of the shape.
# @param height The height of the shape.
def __init__(self, roicoord = ROICoordinate(), bytes = None, x = 0, y = 0, width = 0, height = 0):
ShapeData.__init__(self);
self.x = rdouble(x);
self.y = rdouble(y);
self.width = rdouble(width);
self.height = rdouble(height);
self.bytesdata = bytes;
self.setCoord(roicoord);
##
# overridden, @See ShapeData#setGeometry
#
def setGeometry(self, mask):
mask.setTheZ(self.coord.theZ);
mask.setTheT(self.coord.theZ);
mask.setX(self.x);
mask.setY(self.y);
mask.setWidth(self.width);
mask.setHeight(self.height);
mask.setBytes(self.bytedata);
##
# overridden, @See ShapeData#getGeometryFromROI
#
def getGeometryFromROI(self, roi):
self.x = roi.getX();
self.y = roi.getY();
self.width = roi.getWidth();
self.height = roi.getHeight();
self.bytesdata = roi.getBytes();
##
# overridden, @See ShapeData#createBaseType
#
def createBaseType(self):
return MaskI();
##
# overridden, @See ShapeData#acceptVisitor
#
def acceptVisitor(self, visitor):
visitor.drawMask(self.x.getValue(), self.y.getValue(), self.width.getValue(), self.height.getValue(), self.bytesdata, self.shapeSettings.getSettings());
##
# The PointData class contains all the manipulation and create of PointI
# types.
# It also accepts the ROIDrawingUtils visitor for drawing points.
#
class PointData(ShapeData, ROIDrawingI):
##
# Constructor for PointData object.
# @param roicoord The ROICoordinate of the object (default: 0,0)
# @param x The x coordinate of the shape.
# @param y The y coordinate of the shape.
def __init__(self, roicoord = ROICoordinate(), x = 0, y = 0):
ShapeData.__init__(self);
self.x = rdouble(x);
self.y = rdouble(y);
self.setCoord(roicoord);
##
# overridden, @See ShapeData#setGeometry
#
def setGeometry(self, point):
point.setTheZ(self.coord.theZ);
point.setTheT(self.coord.theZ);
point.setX(self.x);
point.setY(self.y);
##
# overridden, @See ShapeData#getGeometryFromROI
#
def getGeometryFromROI(self, roi):
self.x = roi.getX();
self.y = roi.getY();
##
# overridden, @See ShapeData#createBaseType
#
def createBaseType(self):
return PointI();
##
# overridden, @See ShapeData#acceptVisitor
#
def acceptVisitor(self, visitor):
visitor.drawEllipse(self.x.getValue(), self.y.getValue(), 3, 3, self.shapeSettings.getSettings());
##
# The PolygonData class contains all the manipulation and create of PolygonI
# types.
# It also accepts the ROIDrawingUtils visitor for drawing polygons.
#
class PolygonData(ShapeData, ROIDrawingI):
##
# Constructor for PolygonData object.
# @param roicoord The ROICoordinate of the object (default: 0,0)
# @param pointList The list of points that make up the polygon, as pairs [x1, y1, x2, y2 ..].
def __init__(self, roicoord = ROICoordinate(), pointsList = (0,0)):
ShapeData.__init__(self);
self.points = rstring(self.listToString(pointsList));
self.setCoord(roicoord);
##
# overridden, @See ShapeData#setGeometry
#
def setGeometry(self, polygon):
polygon.setTheZ(self.coord.theZ);
polygon.setTheT(self.coord.theZ);
polygon.setPoints(self.points);
##
# overridden, @See ShapeData#getGeometryFromROI
#
def getGeometryFromROI(self, roi):
self.points = roi.getPoints();
##
# Convert a pointsList[x1,y1,x2,y2..] to a string.
# @param pointsList The list of points to convert.
# @return The pointsList converted to a string.
def listToString(self, pointsList):
string = '';
cnt = 0;
for element in pointsList:
if(cnt!=0):
string = string + ',';
cnt += 1;
string = string + str(element);
return string;
##
# Convert a string of points to a tuple list [(x1,y1),(x2,y2)..].
# @param pointString The string to convert.
# @return The tuple list converted from a string.
def stringToTupleList(self, pointString):
elements = [];
list = pointString.split(',');
numTokens = len(list);
for tokenPair in range(0,numTokens/2):
elements.append((int(list[tokenPair*2]), int(list[tokenPair*2+1])));
return elements;
##
# overridden, @See ShapeData#createBaseType
#
def createBaseType(self):
return PolygonI();
##
# overridden, @See ShapeData#acceptVisitor
#
def acceptVisitor(self, visitor):
visitor.drawPolygon(self.stringToTupleList(self.points.getValue()), self.shapeSettings.getSettings());
##
# The PolylineData class contains all the manipulation and create of PolylineI
# types.
# It also accepts the ROIDrawingUtils visitor for drawing polylines.
#
class PolylineData(ShapeData, ROIDrawingI):
##
# Constructor for PolylineData object.
# @param roicoord The ROICoordinate of the object (default: 0,0)
# @param pointList The list of points that make up the polygon, as pairs [x1, y1, x2, y2 ..].
def __init__(self, roicoord = ROICoordinate(), pointsList = (0,0)):
ShapeData.__init__(self);
self.points = rstring(self.listToString(pointsList));
self.setCoord(roicoord);
##
# overridden, @See ShapeData#setGeometry
#
def setGeometry(self, point):
point.setTheZ(self.coord.theZ);
point.setTheT(self.coord.theZ);
point.setPoints(self.points);
##
# overridden, @See ShapeData#getGeometryFromROI
#
def getGeometryFromROI(self, roi):
self.points = roi.getPoints();
##
# Convert a pointsList[x1,y1,x2,y2..] to a string.
# @param pointsList The list of points to convert.
# @return The pointsList converted to a string.
def listToString(self, pointsList):
string = '';
cnt = 0;
for element in pointsList:
if(cnt > 0):
string = string + ',';
string = string + str(element);
cnt+=1;
return string;
##
# Convert a string of points to a tuple list [(x1,y1),(x2,y2)..].
# @param pointString The string to convert.
# @return The tuple list converted from a string.
def stringToTupleList(self, pointString):
elements = [];
list = pointString.split(',');
numTokens = len(list);
for tokenPair in range(0,numTokens/2):
elements.append((int(list[tokenPair*2]), int(list[tokenPair*2+1])));
return elements;
##
# overridden, @See ShapeData#createBaseType
#
def createBaseType(self):
return PolylineI();
##
# overridden, @See ShapeData#acceptVisitor
#
def acceptVisitor(self, visitor):
visitor.drawPolyline(self.stringToTupleList(self.points.getValue()), self.shapeSettings.getSettings());
|
allenlavoie/tensorflow
|
refs/heads/master
|
tensorflow/contrib/image/python/kernel_tests/distort_image_ops_test.py
|
31
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for python distort_image_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.image.python.ops import distort_image_ops
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
# TODO(huangyp): also measure the differences between AdjustHsvInYiq and
# AdjustHsv in core.
class AdjustHueInYiqTest(test_util.TensorFlowTestCase):
def _adjust_hue_in_yiq_np(self, x_np, delta_h):
"""Rotate hue in YIQ space.
Mathematically we first convert rgb color to yiq space, rotate the hue
degrees, and then convert back to rgb.
Args:
x_np: input x with last dimension = 3.
delta_h: degree of hue rotation, in radians.
Returns:
Adjusted y with the same shape as x_np.
"""
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
u = np.cos(delta_h)
w = np.sin(delta_h)
# Projection matrix from RGB to YIQ. Numbers from wikipedia
# https://en.wikipedia.org/wiki/YIQ
tyiq = np.array([[0.299, 0.587, 0.114], [0.596, -0.274, -0.322],
[0.211, -0.523, 0.312]])
y_v = np.dot(x_v, tyiq.T)
# Hue rotation matrix in YIQ space.
hue_rotation = np.array([[1.0, 0.0, 0.0], [0.0, u, -w], [0.0, w, u]])
y_v = np.dot(y_v, hue_rotation.T)
# Projecting back to RGB space.
y_v = np.dot(y_v, np.linalg.inv(tyiq).T)
return y_v.reshape(x_np.shape)
def _adjust_hue_in_yiq_tf(self, x_np, delta_h):
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np)
y = distort_image_ops.adjust_hsv_in_yiq(x, delta_h, 1, 1)
y_tf = y.eval()
return y_tf
def test_adjust_random_hue_in_yiq(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
'all_random',
'rg_same',
'rb_same',
'gb_same',
'rgb_same',
]
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
delta_h = (np.random.rand() * 2.0 - 1.0) * np.pi
if test_style == 'all_random':
pass
elif test_style == 'rg_same':
x_np[..., 1] = x_np[..., 0]
elif test_style == 'rb_same':
x_np[..., 2] = x_np[..., 0]
elif test_style == 'gb_same':
x_np[..., 2] = x_np[..., 1]
elif test_style == 'rgb_same':
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError('Invalid test style: %s' % (test_style))
y_np = self._adjust_hue_in_yiq_np(x_np, delta_h)
y_tf = self._adjust_hue_in_yiq_tf(x_np, delta_h)
self.assertAllClose(y_tf, y_np, rtol=2e-4, atol=1e-4)
def test_invalid_shapes(self):
x_np = np.random.rand(2, 3) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
with self.assertRaisesRegexp(ValueError, 'Shape must be at least rank 3'):
self._adjust_hue_in_yiq_tf(x_np, delta_h)
x_np = np.random.rand(4, 2, 4) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
with self.assertRaisesOpError('input must have 3 channels but instead has '
'4 channels'):
self._adjust_hue_in_yiq_tf(x_np, delta_h)
class AdjustValueInYiqTest(test_util.TensorFlowTestCase):
def _adjust_value_in_yiq_np(self, x_np, scale):
return x_np * scale
def _adjust_value_in_yiq_tf(self, x_np, scale):
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np)
y = distort_image_ops.adjust_hsv_in_yiq(x, 0, 1, scale)
y_tf = y.eval()
return y_tf
def test_adjust_random_value_in_yiq(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
'all_random',
'rg_same',
'rb_same',
'gb_same',
'rgb_same',
]
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
scale = np.random.rand() * 2.0 - 1.0
if test_style == 'all_random':
pass
elif test_style == 'rg_same':
x_np[..., 1] = x_np[..., 0]
elif test_style == 'rb_same':
x_np[..., 2] = x_np[..., 0]
elif test_style == 'gb_same':
x_np[..., 2] = x_np[..., 1]
elif test_style == 'rgb_same':
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError('Invalid test style: %s' % (test_style))
y_np = self._adjust_value_in_yiq_np(x_np, scale)
y_tf = self._adjust_value_in_yiq_tf(x_np, scale)
self.assertAllClose(y_tf, y_np, rtol=2e-4, atol=1e-4)
def test_invalid_shapes(self):
x_np = np.random.rand(2, 3) * 255.
scale = np.random.rand() * 2.0 - 1.0
with self.assertRaisesRegexp(ValueError, 'Shape must be at least rank 3'):
self._adjust_value_in_yiq_tf(x_np, scale)
x_np = np.random.rand(4, 2, 4) * 255.
scale = np.random.rand() * 2.0 - 1.0
with self.assertRaisesOpError('input must have 3 channels but instead has '
'4 channels'):
self._adjust_value_in_yiq_tf(x_np, scale)
class AdjustSaturationInYiqTest(test_util.TensorFlowTestCase):
def _adjust_saturation_in_yiq_tf(self, x_np, scale):
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np)
y = distort_image_ops.adjust_hsv_in_yiq(x, 0, scale, 1)
y_tf = y.eval()
return y_tf
def _adjust_saturation_in_yiq_np(self, x_np, scale):
"""Adjust saturation using linear interpolation."""
rgb_weights = np.array([0.299, 0.587, 0.114])
gray = np.sum(x_np * rgb_weights, axis=-1, keepdims=True)
y_v = x_np * scale + gray * (1 - scale)
return y_v
def test_adjust_random_saturation_in_yiq(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
'all_random',
'rg_same',
'rb_same',
'gb_same',
'rgb_same',
]
with self.test_session():
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
scale = np.random.rand() * 2.0 - 1.0
if test_style == 'all_random':
pass
elif test_style == 'rg_same':
x_np[..., 1] = x_np[..., 0]
elif test_style == 'rb_same':
x_np[..., 2] = x_np[..., 0]
elif test_style == 'gb_same':
x_np[..., 2] = x_np[..., 1]
elif test_style == 'rgb_same':
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError('Invalid test style: %s' % (test_style))
y_baseline = self._adjust_saturation_in_yiq_np(x_np, scale)
y_tf = self._adjust_saturation_in_yiq_tf(x_np, scale)
self.assertAllClose(y_tf, y_baseline, rtol=2e-4, atol=1e-4)
def test_invalid_shapes(self):
x_np = np.random.rand(2, 3) * 255.
scale = np.random.rand() * 2.0 - 1.0
with self.assertRaisesRegexp(ValueError, 'Shape must be at least rank 3'):
self._adjust_saturation_in_yiq_tf(x_np, scale)
x_np = np.random.rand(4, 2, 4) * 255.
scale = np.random.rand() * 2.0 - 1.0
with self.assertRaisesOpError('input must have 3 channels but instead has '
'4 channels'):
self._adjust_saturation_in_yiq_tf(x_np, scale)
class AdjustHueInYiqBenchmark(test.Benchmark):
def _benchmark_adjust_hue_in_yiq(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session('', graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
delta = constant_op.constant(0.1, dtype=dtypes.float32)
outputs = distort_image_ops.adjust_hsv_in_yiq(inputs, delta, 1, 1)
run_op = control_flow_ops.group(outputs)
sess.run(variables.global_variables_initializer())
for i in xrange(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
sess.run(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + '_%s' % (cpu_count if cpu_count is not None else 'all')
print('benchmarkadjust_hue_in_yiq_299_299_3_%s step_time: %.2f us' %
(tag, step_time * 1e6))
self.report_benchmark(
name='benchmarkadjust_hue_in_yiq_299_299_3_%s' % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmark_adjust_hue_in_yiqCpu1(self):
self._benchmark_adjust_hue_in_yiq('/cpu:0', 1)
def benchmark_adjust_hue_in_yiqCpuAll(self):
self._benchmark_adjust_hue_in_yiq('/cpu:0', None)
def benchmark_adjust_hue_in_yiq_gpu_all(self):
self._benchmark_adjust_hue_in_yiq(test.gpu_device_name(), None)
class AdjustSaturationInYiqBenchmark(test.Benchmark):
def _benchmark_adjust_saturation_in_yiq(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session('', graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
scale = constant_op.constant(0.1, dtype=dtypes.float32)
outputs = distort_image_ops.adjust_hsv_in_yiq(inputs, 0, scale, 1)
run_op = control_flow_ops.group(outputs)
sess.run(variables.global_variables_initializer())
for _ in xrange(warmup_rounds):
sess.run(run_op)
start = time.time()
for _ in xrange(benchmark_rounds):
sess.run(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = '%s' % (cpu_count) if cpu_count is not None else '_all'
print('benchmarkAdjustSaturationInYiq_299_299_3_cpu%s step_time: %.2f us' %
(tag, step_time * 1e6))
self.report_benchmark(
name='benchmarkAdjustSaturationInYiq_299_299_3_cpu%s' % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmark_adjust_saturation_in_yiq_cpu1(self):
self._benchmark_adjust_saturation_in_yiq('/cpu:0', 1)
def benchmark_adjust_saturation_in_yiq_cpu_all(self):
self._benchmark_adjust_saturation_in_yiq('/cpu:0', None)
def benchmark_adjust_saturation_in_yiq_gpu_all(self):
self._benchmark_adjust_saturation_in_yiq(test.gpu_device_name(), None)
if __name__ == '__main__':
googletest.main()
|
OpenPymeMx/OCB
|
refs/heads/7.0
|
addons/sale_margin/__init__.py
|
441
|
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sale_margin
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
NathanW2/QGIS
|
refs/heads/master
|
tests/src/python/test_qgspostgresdomain.py
|
17
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for Postgres domains.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Denis Rouzaud'
__date__ = '10/02/2018'
__copyright__ = 'Copyright 2018, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
import os
from qgis.core import QgsVectorLayer, QgsProject
from qgis.testing import start_app, unittest
start_app()
class TestQgsPostgresDomain(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""
Setup the involved layer
:return:
"""
cls.dbconn = 'service=\'qgis_test\''
if 'QGIS_PGTEST_DB' in os.environ:
cls.dbconn = os.environ['QGIS_PGTEST_DB']
# Create test layer
cls.vl = QgsVectorLayer(cls.dbconn + ' sslmode=disable key=\'pk\' table="qgis_test"."colors" sql=', 'colors', 'postgres')
QgsProject.instance().addMapLayer(cls.vl)
def test_postgres_domain(self):
self.assertEqual(self.vl.dataProvider().enumValues(1), ['red', 'green', 'blue'])
self.assertEqual(self.vl.dataProvider().enumValues(2), ['yellow', 'cyan', 'magenta'])
self.assertEqual(self.vl.dataProvider().enumValues(3), ['Alchemilla', 'Alstroemeria', 'Alyssum'])
if __name__ == '__main__':
unittest.main()
|
sosguns2002/interactive-mining
|
refs/heads/master
|
interactive-mining-3rdparty-madis/madis/src/lib/chardet/langgreekmodel.py
|
235
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import constants
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin7_CharToOrderMap = ( \
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 90,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,248, 61, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
win1253_CharToOrderMap = ( \
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 61,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,253,253, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.2851%
# first 1024 sequences:1.7001%
# rest sequences: 0.0359%
# negative sequences: 0.0148%
GreekLangModel = ( \
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,2,2,3,3,3,3,3,3,3,3,1,3,3,3,0,2,2,3,3,0,3,0,3,2,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,3,0,3,2,3,3,0,3,2,3,3,3,0,0,3,0,3,0,3,3,2,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,2,3,2,2,3,3,3,3,3,3,3,3,0,3,3,3,3,0,2,3,3,0,3,3,3,3,2,3,3,3,0,
2,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,2,1,3,3,3,3,2,3,3,2,3,3,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,2,3,3,0,
2,0,1,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,3,0,0,0,0,3,3,0,3,1,3,3,3,0,3,3,0,3,3,3,3,0,0,0,0,
2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,0,3,3,3,3,3,0,3,2,2,2,3,0,2,3,3,3,3,3,2,3,3,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,2,2,2,3,3,3,3,0,3,1,3,3,3,3,2,3,3,3,3,3,3,3,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,0,0,0,3,3,2,3,3,3,3,3,0,0,3,2,3,0,2,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,0,3,3,0,2,3,0,3,0,3,3,3,0,0,3,0,3,0,2,2,3,3,0,0,
0,0,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,2,3,3,3,3,0,3,3,3,3,3,0,3,3,2,3,2,3,3,2,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,2,3,2,3,3,3,3,3,3,0,2,3,2,3,2,2,2,3,2,3,3,2,3,0,2,2,2,3,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,2,3,3,0,0,3,0,3,0,0,0,3,2,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,0,0,0,3,3,0,3,3,3,0,0,1,2,3,0,
3,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,0,3,2,2,3,3,0,3,3,3,3,3,2,1,3,0,3,2,3,3,2,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,3,0,2,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,3,0,3,2,3,0,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,2,0,3,2,3,0,0,3,2,3,0,
2,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,1,2,2,3,3,3,3,3,3,0,2,3,0,3,0,0,0,3,3,0,3,0,2,0,0,2,3,1,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,3,0,3,3,2,3,0,3,3,3,3,3,3,0,3,3,3,0,2,3,0,0,3,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,3,3,0,3,0,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,3,3,3,0,0,3,0,2,0,0,0,3,3,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,0,3,0,2,0,3,2,0,3,2,3,2,3,0,0,3,2,3,2,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,3,3,0,0,0,3,0,2,1,0,0,3,2,2,2,0,3,0,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,2,0,3,0,3,0,3,3,0,2,1,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,3,0,3,3,3,3,3,3,0,2,3,0,3,0,0,0,2,1,0,2,2,3,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,2,3,0,0,1,3,0,2,0,0,0,0,3,0,1,0,2,0,0,1,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,1,0,3,0,0,0,3,2,0,3,2,3,3,3,0,0,3,0,3,2,2,2,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,0,2,0,2,3,3,2,2,2,2,3,0,2,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,2,0,0,0,0,0,0,2,3,0,2,0,2,3,2,0,0,3,0,3,0,3,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,2,3,3,2,2,3,0,2,0,3,0,0,0,2,0,0,0,0,1,2,0,2,0,2,0,
0,2,0,2,0,2,2,0,0,1,0,2,2,2,0,2,2,2,0,2,2,2,0,0,2,0,0,1,0,0,0,0,
0,2,0,3,3,2,0,0,0,0,0,0,1,3,0,2,0,2,2,2,0,0,2,0,3,0,0,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,3,2,0,2,2,0,2,0,2,2,0,2,0,2,2,2,0,0,0,0,0,0,2,3,0,0,0,2,
0,1,2,0,0,0,0,2,2,0,0,0,2,1,0,2,2,0,0,0,0,0,0,1,0,2,0,0,0,0,0,0,
0,0,2,1,0,2,3,2,2,3,2,3,2,0,0,3,3,3,0,0,3,2,0,0,0,1,1,0,2,0,2,2,
0,2,0,2,0,2,2,0,0,2,0,2,2,2,0,2,2,2,2,0,0,2,0,0,0,2,0,1,0,0,0,0,
0,3,0,3,3,2,2,0,3,0,0,0,2,2,0,2,2,2,1,2,0,0,1,2,2,0,0,3,0,0,0,2,
0,1,2,0,0,0,1,2,0,0,0,0,0,0,0,2,2,0,1,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,2,0,0,0,2,0,2,3,3,0,2,0,0,0,0,0,0,2,2,2,0,2,2,0,2,0,2,
0,2,2,0,0,2,2,2,2,1,0,0,2,2,0,2,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,0,3,2,3,0,0,0,3,0,0,2,2,0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,2,2,0,0,2,2,2,0,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,3,2,0,2,2,2,2,2,0,0,0,2,0,0,0,0,2,0,1,0,0,2,0,1,0,0,0,
0,2,2,2,0,2,2,0,1,2,0,2,2,2,0,2,2,2,2,1,2,2,0,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,2,0,2,2,0,0,0,0,1,2,1,0,0,2,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,3,2,3,0,0,2,0,0,0,2,2,0,2,0,0,0,1,0,0,2,0,2,0,2,2,0,0,0,0,
0,0,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,2,3,2,2,0,0,0,0,0,0,1,3,0,2,0,2,2,0,0,0,1,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,0,3,2,0,2,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,1,0,0,2,1,2,0,2,2,0,1,0,0,1,0,0,0,2,0,0,0,0,0,0,
0,3,0,2,2,2,0,0,2,0,0,0,2,0,0,0,2,3,0,2,0,0,0,0,0,0,2,2,0,0,0,2,
0,1,2,0,0,0,1,2,2,1,0,0,0,2,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,2,0,2,2,0,2,0,0,2,0,0,0,0,1,2,1,0,2,1,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,3,1,2,2,0,2,0,0,0,0,2,0,0,0,2,0,0,3,0,0,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,2,2,2,2,2,0,1,2,0,0,0,2,2,0,1,0,2,0,0,2,2,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,0,0,0,0,2,0,2,0,0,0,0,2,
0,1,2,0,0,0,0,2,2,1,0,1,0,1,0,2,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,2,0,0,2,2,0,0,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,
0,2,2,2,2,0,0,0,3,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,2,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,2,2,2,0,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,1,0,0,0,0,2,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,0,0,2,2,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,2,1,0,0,0,0,0,0,2,0,0,2,0,2,2,2,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,2,0,2,2,0,0,0,0,2,0,2,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,3,0,0,0,2,2,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,0,0,
0,2,2,2,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,
0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,0,0,2,0,0,0,0,0,1,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,2,0,0,0,
0,2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,2,0,2,0,0,0,
0,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
Latin7GreekModel = { \
'charToOrderMap': Latin7_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': constants.False,
'charsetName': "ISO-8859-7"
}
Win1253GreekModel = { \
'charToOrderMap': win1253_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': constants.False,
'charsetName': "windows-1253"
}
|
jepi776/study
|
refs/heads/master
|
node_modules/node-gyp/gyp/pylib/gyp/simple_copy.py
|
1869
|
# Copyright 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A clone of the default copy.deepcopy that doesn't handle cyclic
structures or complex types except for dicts and lists. This is
because gyp copies so large structure that small copy overhead ends up
taking seconds in a project the size of Chromium."""
class Error(Exception):
pass
__all__ = ["Error", "deepcopy"]
def deepcopy(x):
"""Deep copy operation on gyp objects such as strings, ints, dicts
and lists. More than twice as fast as copy.deepcopy but much less
generic."""
try:
return _deepcopy_dispatch[type(x)](x)
except KeyError:
raise Error('Unsupported type %s for deepcopy. Use copy.deepcopy ' +
'or expand simple_copy support.' % type(x))
_deepcopy_dispatch = d = {}
def _deepcopy_atomic(x):
return x
for x in (type(None), int, long, float,
bool, str, unicode, type):
d[x] = _deepcopy_atomic
def _deepcopy_list(x):
return [deepcopy(a) for a in x]
d[list] = _deepcopy_list
def _deepcopy_dict(x):
y = {}
for key, value in x.iteritems():
y[deepcopy(key)] = deepcopy(value)
return y
d[dict] = _deepcopy_dict
del d
|
montyly/manticore
|
refs/heads/master
|
scripts/extract_syscalls.py
|
2
|
#!/usr/bin/env python3
"""
Generate syscall tables from the Linux source. Used to generate
manticore/platforms/linux_syscalls.py.
This fetches the tables from kernel.org.
Usage:
./extract_syscalls.py [--linux_version linux_version] linux_syscalls.py
"""
import argparse
import os
import re
import subprocess
import sys
import tempfile
from urllib.request import urlopen
BASE_URL = (
"https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/plain/{}?id=refs/tags/v{}"
)
# Use an associative list rather than a dict to get deterministic output.
ARCH_TABLES = [
("i386", "arch/x86/entry/syscalls/syscall_32.tbl"),
("amd64", "arch/x86/entry/syscalls/syscall_64.tbl"),
("armv7", "arch/arm/tools/syscall.tbl"),
]
BITSPERLONG_HDR = "arch/{}/include/uapi/asm/bitsperlong.h"
ARCH_UNISTD_HDR = "arch/{}/include/uapi/asm/unistd.h"
UNISTD_HDR = "include/uapi/asm-generic/unistd.h"
# Format: Manticore arch, Linux arch.
# XXX: Code that uses this might need to be tweaked for other architectures to
# work properly.
UNISTD = [("aarch64", "arm64")]
__ARM_NR_BASE = 0xF0000
ADDITIONAL_SYSCALLS = {
"armv7": [
("sys_ARM_NR_breakpoint", __ARM_NR_BASE + 1),
("sys_ARM_NR_cacheflush", __ARM_NR_BASE + 2),
("sys_ARM_NR_usr26", __ARM_NR_BASE + 3),
("sys_ARM_NR_usr32", __ARM_NR_BASE + 4),
("sys_ARM_NR_set_tls", __ARM_NR_BASE + 5),
]
}
def open_url(url):
res = urlopen(url)
if res.code // 100 != 2:
sys.stderr.write("Failed retrieving file; check version and connection.\n")
sys.stderr.write(f"Url: {url}\n")
sys.exit(1)
return res
def write_without_includes(f, res):
for line in res.readlines():
line = line.decode()
line = line.strip()
if not line.startswith("#include"):
f.write(line + "\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generate syscall tables")
parser.add_argument("output", help="Python output to generate tables")
parser.add_argument(
"--linux_version", help="Major version of the Linux kernel to use", default="4.11"
)
args = parser.parse_args()
output = open(args.output, "w+")
output.write("#\n#\n# AUTOGENERATED, DO NOT EDIT\n#\n")
output.write(f"# From version: {args.linux_version}\n#\n\n")
for arch, path in ARCH_TABLES:
url = BASE_URL.format(path, args.linux_version)
res = open_url(url)
output.write(f"{arch} = {{\n")
for line in res.readlines():
line = line.decode()
line = line.strip()
if line.startswith("#"):
continue
columns = line.split()
if len(columns) < 4:
continue
num, abi, name, entry = columns[:4]
output.write(f' {num}: "{entry}",\n')
for entry, num in ADDITIONAL_SYSCALLS.get(arch, {}):
output.write(f' {num}: "{entry}",\n')
output.write("}\n")
for march, larch in UNISTD:
bitsperlong_hdr = BITSPERLONG_HDR.format(larch)
arch_unistd_hdr = ARCH_UNISTD_HDR.format(larch)
bitsperlong_url = BASE_URL.format(bitsperlong_hdr, args.linux_version)
arch_unistd_url = BASE_URL.format(arch_unistd_hdr, args.linux_version)
unistd_url = BASE_URL.format(UNISTD_HDR, args.linux_version)
bitsperlong_res = open_url(bitsperlong_url)
arch_unistd_res = open_url(arch_unistd_url)
unistd_res = open_url(unistd_url)
syscall_rx = "SYSCALL: (\d+) ([a-z_0-9]+)"
syscall_define = "#define __SYSCALL(nr, sym) SYSCALL: nr sym"
output.write(f"{march} = {{\n")
fd, tmp_path = tempfile.mkstemp()
try:
with os.fdopen(fd, "w") as tmp:
# The order is important here for CPP to work correctly.
tmp.write(syscall_define + "\n")
write_without_includes(tmp, bitsperlong_res)
write_without_includes(tmp, arch_unistd_res)
write_without_includes(tmp, unistd_res)
process = subprocess.Popen(
["cpp", "-E", tmp_path], stdout=subprocess.PIPE, encoding="utf-8"
)
out, _ = process.communicate()
lines = out.split("\n")
for line in lines:
m = re.search(syscall_rx, line)
if m:
num = m.group(1)
entry = m.group(2)
if entry != "sys_ni_syscall": # not implemented syscall
output.write(f' {num}: "{entry}",\n')
finally:
os.remove(tmp_path)
for entry, num in ADDITIONAL_SYSCALLS.get(march, {}):
output.write(f' {num}: "{entry}",\n')
output.write("}\n")
|
encukou/cython
|
refs/heads/master
|
Demos/benchmarks/richards.py
|
23
|
# based on a Java version:
# Based on original version written in BCPL by Dr Martin Richards
# in 1981 at Cambridge University Computer Laboratory, England
# and a C++ version derived from a Smalltalk version written by
# L Peter Deutsch.
# Java version: Copyright (C) 1995 Sun Microsystems, Inc.
# Translation from C++, Mario Wolczko
# Outer loop added by Alex Jacoby
# Task IDs
I_IDLE = 1
I_WORK = 2
I_HANDLERA = 3
I_HANDLERB = 4
I_DEVA = 5
I_DEVB = 6
# Packet types
K_DEV = 1000
K_WORK = 1001
# Packet
BUFSIZE = 4
BUFSIZE_RANGE = range(BUFSIZE)
class Packet(object):
def __init__(self,l,i,k):
self.link = l
self.ident = i
self.kind = k
self.datum = 0
self.data = [0] * BUFSIZE
def append_to(self,lst):
self.link = None
if lst is None:
return self
else:
p = lst
next = p.link
while next is not None:
p = next
next = p.link
p.link = self
return lst
# Task Records
class TaskRec(object):
pass
class DeviceTaskRec(TaskRec):
def __init__(self):
self.pending = None
class IdleTaskRec(TaskRec):
def __init__(self):
self.control = 1
self.count = 10000
class HandlerTaskRec(TaskRec):
def __init__(self):
self.work_in = None
self.device_in = None
def workInAdd(self,p):
self.work_in = p.append_to(self.work_in)
return self.work_in
def deviceInAdd(self,p):
self.device_in = p.append_to(self.device_in)
return self.device_in
class WorkerTaskRec(TaskRec):
def __init__(self):
self.destination = I_HANDLERA
self.count = 0
# Task
class TaskState(object):
def __init__(self):
self.packet_pending = True
self.task_waiting = False
self.task_holding = False
def packetPending(self):
self.packet_pending = True
self.task_waiting = False
self.task_holding = False
return self
def waiting(self):
self.packet_pending = False
self.task_waiting = True
self.task_holding = False
return self
def running(self):
self.packet_pending = False
self.task_waiting = False
self.task_holding = False
return self
def waitingWithPacket(self):
self.packet_pending = True
self.task_waiting = True
self.task_holding = False
return self
def isPacketPending(self):
return self.packet_pending
def isTaskWaiting(self):
return self.task_waiting
def isTaskHolding(self):
return self.task_holding
def isTaskHoldingOrWaiting(self):
return self.task_holding or (not self.packet_pending and self.task_waiting)
def isWaitingWithPacket(self):
return self.packet_pending and self.task_waiting and not self.task_holding
tracing = False
layout = 0
def trace(a):
global layout
layout -= 1
if layout <= 0:
print()
layout = 50
print(a, end='')
TASKTABSIZE = 10
class TaskWorkArea(object):
def __init__(self):
self.taskTab = [None] * TASKTABSIZE
self.taskList = None
self.holdCount = 0
self.qpktCount = 0
taskWorkArea = TaskWorkArea()
class Task(TaskState):
def __init__(self,i,p,w,initialState,r):
self.link = taskWorkArea.taskList
self.ident = i
self.priority = p
self.input = w
self.packet_pending = initialState.isPacketPending()
self.task_waiting = initialState.isTaskWaiting()
self.task_holding = initialState.isTaskHolding()
self.handle = r
taskWorkArea.taskList = self
taskWorkArea.taskTab[i] = self
def fn(self,pkt,r):
raise NotImplementedError
def addPacket(self,p,old):
if self.input is None:
self.input = p
self.packet_pending = True
if self.priority > old.priority:
return self
else:
p.append_to(self.input)
return old
def runTask(self):
if self.isWaitingWithPacket():
msg = self.input
self.input = msg.link
if self.input is None:
self.running()
else:
self.packetPending()
else:
msg = None
return self.fn(msg,self.handle)
def waitTask(self):
self.task_waiting = True
return self
def hold(self):
taskWorkArea.holdCount += 1
self.task_holding = True
return self.link
def release(self,i):
t = self.findtcb(i)
t.task_holding = False
if t.priority > self.priority:
return t
else:
return self
def qpkt(self,pkt):
t = self.findtcb(pkt.ident)
taskWorkArea.qpktCount += 1
pkt.link = None
pkt.ident = self.ident
return t.addPacket(pkt,self)
def findtcb(self,id):
t = taskWorkArea.taskTab[id]
if t is None:
raise Exception("Bad task id %d" % id)
return t
# DeviceTask
class DeviceTask(Task):
def __init__(self,i,p,w,s,r):
Task.__init__(self,i,p,w,s,r)
def fn(self,pkt,r):
d = r
assert isinstance(d, DeviceTaskRec)
if pkt is None:
pkt = d.pending
if pkt is None:
return self.waitTask()
else:
d.pending = None
return self.qpkt(pkt)
else:
d.pending = pkt
if tracing: trace(pkt.datum)
return self.hold()
class HandlerTask(Task):
def __init__(self,i,p,w,s,r):
Task.__init__(self,i,p,w,s,r)
def fn(self,pkt,r):
h = r
assert isinstance(h, HandlerTaskRec)
if pkt is not None:
if pkt.kind == K_WORK:
h.workInAdd(pkt)
else:
h.deviceInAdd(pkt)
work = h.work_in
if work is None:
return self.waitTask()
count = work.datum
if count >= BUFSIZE:
h.work_in = work.link
return self.qpkt(work)
dev = h.device_in
if dev is None:
return self.waitTask()
h.device_in = dev.link
dev.datum = work.data[count]
work.datum = count + 1
return self.qpkt(dev)
# IdleTask
class IdleTask(Task):
def __init__(self,i,p,w,s,r):
Task.__init__(self,i,0,None,s,r)
def fn(self,pkt,r):
i = r
assert isinstance(i, IdleTaskRec)
i.count -= 1
if i.count == 0:
return self.hold()
elif i.control & 1 == 0:
i.control //= 2
return self.release(I_DEVA)
else:
i.control = i.control//2 ^ 0xd008
return self.release(I_DEVB)
# WorkTask
A = ord('A')
class WorkTask(Task):
def __init__(self,i,p,w,s,r):
Task.__init__(self,i,p,w,s,r)
def fn(self,pkt,r):
w = r
assert isinstance(w, WorkerTaskRec)
if pkt is None:
return self.waitTask()
if w.destination == I_HANDLERA:
dest = I_HANDLERB
else:
dest = I_HANDLERA
w.destination = dest
pkt.ident = dest
pkt.datum = 0
for i in BUFSIZE_RANGE: # range(BUFSIZE)
w.count += 1
if w.count > 26:
w.count = 1
pkt.data[i] = A + w.count - 1
return self.qpkt(pkt)
import time
def schedule():
t = taskWorkArea.taskList
while t is not None:
pkt = None
if tracing:
print("tcb =", t.ident)
if t.isTaskHoldingOrWaiting():
t = t.link
else:
if tracing: trace(chr(ord("0")+t.ident))
t = t.runTask()
class Richards(object):
def run(self, iterations):
for i in range(iterations):
taskWorkArea.holdCount = 0
taskWorkArea.qpktCount = 0
IdleTask(I_IDLE, 1, 10000, TaskState().running(), IdleTaskRec())
wkq = Packet(None, 0, K_WORK)
wkq = Packet(wkq , 0, K_WORK)
WorkTask(I_WORK, 1000, wkq, TaskState().waitingWithPacket(), WorkerTaskRec())
wkq = Packet(None, I_DEVA, K_DEV)
wkq = Packet(wkq , I_DEVA, K_DEV)
wkq = Packet(wkq , I_DEVA, K_DEV)
HandlerTask(I_HANDLERA, 2000, wkq, TaskState().waitingWithPacket(), HandlerTaskRec())
wkq = Packet(None, I_DEVB, K_DEV)
wkq = Packet(wkq , I_DEVB, K_DEV)
wkq = Packet(wkq , I_DEVB, K_DEV)
HandlerTask(I_HANDLERB, 3000, wkq, TaskState().waitingWithPacket(), HandlerTaskRec())
wkq = None;
DeviceTask(I_DEVA, 4000, wkq, TaskState().waiting(), DeviceTaskRec());
DeviceTask(I_DEVB, 5000, wkq, TaskState().waiting(), DeviceTaskRec());
schedule()
if taskWorkArea.holdCount == 9297 and taskWorkArea.qpktCount == 23246:
pass
else:
return False
return True
def entry_point(iterations):
r = Richards()
startTime = time.time()
result = r.run(iterations)
endTime = time.time()
return result, startTime, endTime
def main(iterations = 10, entry_point = entry_point):
print("Richards benchmark (Python) starting... [%r]" % entry_point)
result, startTime, endTime = entry_point(iterations)
if not result:
print("Incorrect results!")
return -1
print("finished.")
total_s = endTime - startTime
print("Total time for %d iterations: %.2f secs" % (iterations, total_s))
print("Average time per iteration: %.2f ms" % (total_s*1000/iterations))
return 42
try:
import sys
if '-nojit' in sys.argv:
sys.argv.remove('-nojit')
raise ImportError
import pypyjit
except ImportError:
pass
else:
import types
for item in globals().values():
if isinstance(item, types.FunctionType):
pypyjit.enable(item.func_code)
elif isinstance(item, type):
for it in item.__dict__.values():
if isinstance(it, types.FunctionType):
pypyjit.enable(it.func_code)
if __name__ == '__main__':
import sys
if len(sys.argv) >= 2:
main(iterations = int(sys.argv[1]))
else:
main()
|
ltilve/ChromiumGStreamerBackend
|
refs/heads/master
|
tools/symsrc/pefile.py
|
187
|
# -*- coding: Latin-1 -*-
"""pefile, Portable Executable reader module
All the PE file basic structures are available with their default names
as attributes of the instance returned.
Processed elements such as the import table are made available with lowercase
names, to differentiate them from the upper case basic structure names.
pefile has been tested against the limits of valid PE headers, that is, malware.
Lots of packed malware attempt to abuse the format way beyond its standard use.
To the best of my knowledge most of the abuses are handled gracefully.
Copyright (c) 2005, 2006, 2007, 2008 Ero Carrera <ero@dkbza.org>
All rights reserved.
For detailed copyright information see the file COPYING in
the root of the distribution archive.
"""
__author__ = 'Ero Carrera'
__version__ = '1.2.9.1'
__contact__ = 'ero@dkbza.org'
import os
import struct
import time
import math
import re
import exceptions
import string
import array
sha1, sha256, sha512, md5 = None, None, None, None
try:
import hashlib
sha1 = hashlib.sha1
sha256 = hashlib.sha256
sha512 = hashlib.sha512
md5 = hashlib.md5
except ImportError:
try:
import sha
sha1 = sha.new
except ImportError:
pass
try:
import md5
md5 = md5.new
except ImportError:
pass
fast_load = False
IMAGE_DOS_SIGNATURE = 0x5A4D
IMAGE_OS2_SIGNATURE = 0x454E
IMAGE_OS2_SIGNATURE_LE = 0x454C
IMAGE_VXD_SIGNATURE = 0x454C
IMAGE_NT_SIGNATURE = 0x00004550
IMAGE_NUMBEROF_DIRECTORY_ENTRIES= 16
IMAGE_ORDINAL_FLAG = 0x80000000L
IMAGE_ORDINAL_FLAG64 = 0x8000000000000000L
OPTIONAL_HEADER_MAGIC_PE = 0x10b
OPTIONAL_HEADER_MAGIC_PE_PLUS = 0x20b
directory_entry_types = [
('IMAGE_DIRECTORY_ENTRY_EXPORT', 0),
('IMAGE_DIRECTORY_ENTRY_IMPORT', 1),
('IMAGE_DIRECTORY_ENTRY_RESOURCE', 2),
('IMAGE_DIRECTORY_ENTRY_EXCEPTION', 3),
('IMAGE_DIRECTORY_ENTRY_SECURITY', 4),
('IMAGE_DIRECTORY_ENTRY_BASERELOC', 5),
('IMAGE_DIRECTORY_ENTRY_DEBUG', 6),
('IMAGE_DIRECTORY_ENTRY_COPYRIGHT', 7),
('IMAGE_DIRECTORY_ENTRY_GLOBALPTR', 8),
('IMAGE_DIRECTORY_ENTRY_TLS', 9),
('IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG', 10),
('IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT', 11),
('IMAGE_DIRECTORY_ENTRY_IAT', 12),
('IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT', 13),
('IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR',14),
('IMAGE_DIRECTORY_ENTRY_RESERVED', 15) ]
DIRECTORY_ENTRY = dict([(e[1], e[0]) for e in directory_entry_types]+directory_entry_types)
image_characteristics = [
('IMAGE_FILE_RELOCS_STRIPPED', 0x0001),
('IMAGE_FILE_EXECUTABLE_IMAGE', 0x0002),
('IMAGE_FILE_LINE_NUMS_STRIPPED', 0x0004),
('IMAGE_FILE_LOCAL_SYMS_STRIPPED', 0x0008),
('IMAGE_FILE_AGGRESIVE_WS_TRIM', 0x0010),
('IMAGE_FILE_LARGE_ADDRESS_AWARE', 0x0020),
('IMAGE_FILE_16BIT_MACHINE', 0x0040),
('IMAGE_FILE_BYTES_REVERSED_LO', 0x0080),
('IMAGE_FILE_32BIT_MACHINE', 0x0100),
('IMAGE_FILE_DEBUG_STRIPPED', 0x0200),
('IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP', 0x0400),
('IMAGE_FILE_NET_RUN_FROM_SWAP', 0x0800),
('IMAGE_FILE_SYSTEM', 0x1000),
('IMAGE_FILE_DLL', 0x2000),
('IMAGE_FILE_UP_SYSTEM_ONLY', 0x4000),
('IMAGE_FILE_BYTES_REVERSED_HI', 0x8000) ]
IMAGE_CHARACTERISTICS = dict([(e[1], e[0]) for e in
image_characteristics]+image_characteristics)
section_characteristics = [
('IMAGE_SCN_CNT_CODE', 0x00000020),
('IMAGE_SCN_CNT_INITIALIZED_DATA', 0x00000040),
('IMAGE_SCN_CNT_UNINITIALIZED_DATA', 0x00000080),
('IMAGE_SCN_LNK_OTHER', 0x00000100),
('IMAGE_SCN_LNK_INFO', 0x00000200),
('IMAGE_SCN_LNK_REMOVE', 0x00000800),
('IMAGE_SCN_LNK_COMDAT', 0x00001000),
('IMAGE_SCN_MEM_FARDATA', 0x00008000),
('IMAGE_SCN_MEM_PURGEABLE', 0x00020000),
('IMAGE_SCN_MEM_16BIT', 0x00020000),
('IMAGE_SCN_MEM_LOCKED', 0x00040000),
('IMAGE_SCN_MEM_PRELOAD', 0x00080000),
('IMAGE_SCN_ALIGN_1BYTES', 0x00100000),
('IMAGE_SCN_ALIGN_2BYTES', 0x00200000),
('IMAGE_SCN_ALIGN_4BYTES', 0x00300000),
('IMAGE_SCN_ALIGN_8BYTES', 0x00400000),
('IMAGE_SCN_ALIGN_16BYTES', 0x00500000),
('IMAGE_SCN_ALIGN_32BYTES', 0x00600000),
('IMAGE_SCN_ALIGN_64BYTES', 0x00700000),
('IMAGE_SCN_ALIGN_128BYTES', 0x00800000),
('IMAGE_SCN_ALIGN_256BYTES', 0x00900000),
('IMAGE_SCN_ALIGN_512BYTES', 0x00A00000),
('IMAGE_SCN_ALIGN_1024BYTES', 0x00B00000),
('IMAGE_SCN_ALIGN_2048BYTES', 0x00C00000),
('IMAGE_SCN_ALIGN_4096BYTES', 0x00D00000),
('IMAGE_SCN_ALIGN_8192BYTES', 0x00E00000),
('IMAGE_SCN_ALIGN_MASK', 0x00F00000),
('IMAGE_SCN_LNK_NRELOC_OVFL', 0x01000000),
('IMAGE_SCN_MEM_DISCARDABLE', 0x02000000),
('IMAGE_SCN_MEM_NOT_CACHED', 0x04000000),
('IMAGE_SCN_MEM_NOT_PAGED', 0x08000000),
('IMAGE_SCN_MEM_SHARED', 0x10000000),
('IMAGE_SCN_MEM_EXECUTE', 0x20000000),
('IMAGE_SCN_MEM_READ', 0x40000000),
('IMAGE_SCN_MEM_WRITE', 0x80000000L) ]
SECTION_CHARACTERISTICS = dict([(e[1], e[0]) for e in
section_characteristics]+section_characteristics)
debug_types = [
('IMAGE_DEBUG_TYPE_UNKNOWN', 0),
('IMAGE_DEBUG_TYPE_COFF', 1),
('IMAGE_DEBUG_TYPE_CODEVIEW', 2),
('IMAGE_DEBUG_TYPE_FPO', 3),
('IMAGE_DEBUG_TYPE_MISC', 4),
('IMAGE_DEBUG_TYPE_EXCEPTION', 5),
('IMAGE_DEBUG_TYPE_FIXUP', 6),
('IMAGE_DEBUG_TYPE_OMAP_TO_SRC', 7),
('IMAGE_DEBUG_TYPE_OMAP_FROM_SRC', 8),
('IMAGE_DEBUG_TYPE_BORLAND', 9),
('IMAGE_DEBUG_TYPE_RESERVED10', 10) ]
DEBUG_TYPE = dict([(e[1], e[0]) for e in debug_types]+debug_types)
subsystem_types = [
('IMAGE_SUBSYSTEM_UNKNOWN', 0),
('IMAGE_SUBSYSTEM_NATIVE', 1),
('IMAGE_SUBSYSTEM_WINDOWS_GUI', 2),
('IMAGE_SUBSYSTEM_WINDOWS_CUI', 3),
('IMAGE_SUBSYSTEM_OS2_CUI', 5),
('IMAGE_SUBSYSTEM_POSIX_CUI', 7),
('IMAGE_SUBSYSTEM_WINDOWS_CE_GUI', 9),
('IMAGE_SUBSYSTEM_EFI_APPLICATION', 10),
('IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER', 11),
('IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER', 12),
('IMAGE_SUBSYSTEM_EFI_ROM', 13),
('IMAGE_SUBSYSTEM_XBOX', 14)]
SUBSYSTEM_TYPE = dict([(e[1], e[0]) for e in subsystem_types]+subsystem_types)
machine_types = [
('IMAGE_FILE_MACHINE_UNKNOWN', 0),
('IMAGE_FILE_MACHINE_AM33', 0x1d3),
('IMAGE_FILE_MACHINE_AMD64', 0x8664),
('IMAGE_FILE_MACHINE_ARM', 0x1c0),
('IMAGE_FILE_MACHINE_EBC', 0xebc),
('IMAGE_FILE_MACHINE_I386', 0x14c),
('IMAGE_FILE_MACHINE_IA64', 0x200),
('IMAGE_FILE_MACHINE_MR32', 0x9041),
('IMAGE_FILE_MACHINE_MIPS16', 0x266),
('IMAGE_FILE_MACHINE_MIPSFPU', 0x366),
('IMAGE_FILE_MACHINE_MIPSFPU16',0x466),
('IMAGE_FILE_MACHINE_POWERPC', 0x1f0),
('IMAGE_FILE_MACHINE_POWERPCFP',0x1f1),
('IMAGE_FILE_MACHINE_R4000', 0x166),
('IMAGE_FILE_MACHINE_SH3', 0x1a2),
('IMAGE_FILE_MACHINE_SH3DSP', 0x1a3),
('IMAGE_FILE_MACHINE_SH4', 0x1a6),
('IMAGE_FILE_MACHINE_SH5', 0x1a8),
('IMAGE_FILE_MACHINE_THUMB', 0x1c2),
('IMAGE_FILE_MACHINE_WCEMIPSV2',0x169),
]
MACHINE_TYPE = dict([(e[1], e[0]) for e in machine_types]+machine_types)
relocation_types = [
('IMAGE_REL_BASED_ABSOLUTE', 0),
('IMAGE_REL_BASED_HIGH', 1),
('IMAGE_REL_BASED_LOW', 2),
('IMAGE_REL_BASED_HIGHLOW', 3),
('IMAGE_REL_BASED_HIGHADJ', 4),
('IMAGE_REL_BASED_MIPS_JMPADDR', 5),
('IMAGE_REL_BASED_SECTION', 6),
('IMAGE_REL_BASED_REL', 7),
('IMAGE_REL_BASED_MIPS_JMPADDR16', 9),
('IMAGE_REL_BASED_IA64_IMM64', 9),
('IMAGE_REL_BASED_DIR64', 10),
('IMAGE_REL_BASED_HIGH3ADJ', 11) ]
RELOCATION_TYPE = dict([(e[1], e[0]) for e in relocation_types]+relocation_types)
dll_characteristics = [
('IMAGE_DLL_CHARACTERISTICS_RESERVED_0x0001', 0x0001),
('IMAGE_DLL_CHARACTERISTICS_RESERVED_0x0002', 0x0002),
('IMAGE_DLL_CHARACTERISTICS_RESERVED_0x0004', 0x0004),
('IMAGE_DLL_CHARACTERISTICS_RESERVED_0x0008', 0x0008),
('IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE', 0x0040),
('IMAGE_DLL_CHARACTERISTICS_FORCE_INTEGRITY', 0x0080),
('IMAGE_DLL_CHARACTERISTICS_NX_COMPAT', 0x0100),
('IMAGE_DLL_CHARACTERISTICS_NO_ISOLATION', 0x0200),
('IMAGE_DLL_CHARACTERISTICS_NO_SEH', 0x0400),
('IMAGE_DLL_CHARACTERISTICS_NO_BIND', 0x0800),
('IMAGE_DLL_CHARACTERISTICS_RESERVED_0x1000', 0x1000),
('IMAGE_DLL_CHARACTERISTICS_WDM_DRIVER', 0x2000),
('IMAGE_DLL_CHARACTERISTICS_TERMINAL_SERVER_AWARE', 0x8000) ]
DLL_CHARACTERISTICS = dict([(e[1], e[0]) for e in dll_characteristics]+dll_characteristics)
# Resource types
resource_type = [
('RT_CURSOR', 1),
('RT_BITMAP', 2),
('RT_ICON', 3),
('RT_MENU', 4),
('RT_DIALOG', 5),
('RT_STRING', 6),
('RT_FONTDIR', 7),
('RT_FONT', 8),
('RT_ACCELERATOR', 9),
('RT_RCDATA', 10),
('RT_MESSAGETABLE', 11),
('RT_GROUP_CURSOR', 12),
('RT_GROUP_ICON', 14),
('RT_VERSION', 16),
('RT_DLGINCLUDE', 17),
('RT_PLUGPLAY', 19),
('RT_VXD', 20),
('RT_ANICURSOR', 21),
('RT_ANIICON', 22),
('RT_HTML', 23),
('RT_MANIFEST', 24) ]
RESOURCE_TYPE = dict([(e[1], e[0]) for e in resource_type]+resource_type)
# Language definitions
lang = [
('LANG_NEUTRAL', 0x00),
('LANG_INVARIANT', 0x7f),
('LANG_AFRIKAANS', 0x36),
('LANG_ALBANIAN', 0x1c),
('LANG_ARABIC', 0x01),
('LANG_ARMENIAN', 0x2b),
('LANG_ASSAMESE', 0x4d),
('LANG_AZERI', 0x2c),
('LANG_BASQUE', 0x2d),
('LANG_BELARUSIAN', 0x23),
('LANG_BENGALI', 0x45),
('LANG_BULGARIAN', 0x02),
('LANG_CATALAN', 0x03),
('LANG_CHINESE', 0x04),
('LANG_CROATIAN', 0x1a),
('LANG_CZECH', 0x05),
('LANG_DANISH', 0x06),
('LANG_DIVEHI', 0x65),
('LANG_DUTCH', 0x13),
('LANG_ENGLISH', 0x09),
('LANG_ESTONIAN', 0x25),
('LANG_FAEROESE', 0x38),
('LANG_FARSI', 0x29),
('LANG_FINNISH', 0x0b),
('LANG_FRENCH', 0x0c),
('LANG_GALICIAN', 0x56),
('LANG_GEORGIAN', 0x37),
('LANG_GERMAN', 0x07),
('LANG_GREEK', 0x08),
('LANG_GUJARATI', 0x47),
('LANG_HEBREW', 0x0d),
('LANG_HINDI', 0x39),
('LANG_HUNGARIAN', 0x0e),
('LANG_ICELANDIC', 0x0f),
('LANG_INDONESIAN', 0x21),
('LANG_ITALIAN', 0x10),
('LANG_JAPANESE', 0x11),
('LANG_KANNADA', 0x4b),
('LANG_KASHMIRI', 0x60),
('LANG_KAZAK', 0x3f),
('LANG_KONKANI', 0x57),
('LANG_KOREAN', 0x12),
('LANG_KYRGYZ', 0x40),
('LANG_LATVIAN', 0x26),
('LANG_LITHUANIAN', 0x27),
('LANG_MACEDONIAN', 0x2f),
('LANG_MALAY', 0x3e),
('LANG_MALAYALAM', 0x4c),
('LANG_MANIPURI', 0x58),
('LANG_MARATHI', 0x4e),
('LANG_MONGOLIAN', 0x50),
('LANG_NEPALI', 0x61),
('LANG_NORWEGIAN', 0x14),
('LANG_ORIYA', 0x48),
('LANG_POLISH', 0x15),
('LANG_PORTUGUESE', 0x16),
('LANG_PUNJABI', 0x46),
('LANG_ROMANIAN', 0x18),
('LANG_RUSSIAN', 0x19),
('LANG_SANSKRIT', 0x4f),
('LANG_SERBIAN', 0x1a),
('LANG_SINDHI', 0x59),
('LANG_SLOVAK', 0x1b),
('LANG_SLOVENIAN', 0x24),
('LANG_SPANISH', 0x0a),
('LANG_SWAHILI', 0x41),
('LANG_SWEDISH', 0x1d),
('LANG_SYRIAC', 0x5a),
('LANG_TAMIL', 0x49),
('LANG_TATAR', 0x44),
('LANG_TELUGU', 0x4a),
('LANG_THAI', 0x1e),
('LANG_TURKISH', 0x1f),
('LANG_UKRAINIAN', 0x22),
('LANG_URDU', 0x20),
('LANG_UZBEK', 0x43),
('LANG_VIETNAMESE', 0x2a),
('LANG_GAELIC', 0x3c),
('LANG_MALTESE', 0x3a),
('LANG_MAORI', 0x28),
('LANG_RHAETO_ROMANCE',0x17),
('LANG_SAAMI', 0x3b),
('LANG_SORBIAN', 0x2e),
('LANG_SUTU', 0x30),
('LANG_TSONGA', 0x31),
('LANG_TSWANA', 0x32),
('LANG_VENDA', 0x33),
('LANG_XHOSA', 0x34),
('LANG_ZULU', 0x35),
('LANG_ESPERANTO', 0x8f),
('LANG_WALON', 0x90),
('LANG_CORNISH', 0x91),
('LANG_WELSH', 0x92),
('LANG_BRETON', 0x93) ]
LANG = dict(lang+[(e[1], e[0]) for e in lang])
# Sublanguage definitions
sublang = [
('SUBLANG_NEUTRAL', 0x00),
('SUBLANG_DEFAULT', 0x01),
('SUBLANG_SYS_DEFAULT', 0x02),
('SUBLANG_ARABIC_SAUDI_ARABIA', 0x01),
('SUBLANG_ARABIC_IRAQ', 0x02),
('SUBLANG_ARABIC_EGYPT', 0x03),
('SUBLANG_ARABIC_LIBYA', 0x04),
('SUBLANG_ARABIC_ALGERIA', 0x05),
('SUBLANG_ARABIC_MOROCCO', 0x06),
('SUBLANG_ARABIC_TUNISIA', 0x07),
('SUBLANG_ARABIC_OMAN', 0x08),
('SUBLANG_ARABIC_YEMEN', 0x09),
('SUBLANG_ARABIC_SYRIA', 0x0a),
('SUBLANG_ARABIC_JORDAN', 0x0b),
('SUBLANG_ARABIC_LEBANON', 0x0c),
('SUBLANG_ARABIC_KUWAIT', 0x0d),
('SUBLANG_ARABIC_UAE', 0x0e),
('SUBLANG_ARABIC_BAHRAIN', 0x0f),
('SUBLANG_ARABIC_QATAR', 0x10),
('SUBLANG_AZERI_LATIN', 0x01),
('SUBLANG_AZERI_CYRILLIC', 0x02),
('SUBLANG_CHINESE_TRADITIONAL', 0x01),
('SUBLANG_CHINESE_SIMPLIFIED', 0x02),
('SUBLANG_CHINESE_HONGKONG', 0x03),
('SUBLANG_CHINESE_SINGAPORE', 0x04),
('SUBLANG_CHINESE_MACAU', 0x05),
('SUBLANG_DUTCH', 0x01),
('SUBLANG_DUTCH_BELGIAN', 0x02),
('SUBLANG_ENGLISH_US', 0x01),
('SUBLANG_ENGLISH_UK', 0x02),
('SUBLANG_ENGLISH_AUS', 0x03),
('SUBLANG_ENGLISH_CAN', 0x04),
('SUBLANG_ENGLISH_NZ', 0x05),
('SUBLANG_ENGLISH_EIRE', 0x06),
('SUBLANG_ENGLISH_SOUTH_AFRICA', 0x07),
('SUBLANG_ENGLISH_JAMAICA', 0x08),
('SUBLANG_ENGLISH_CARIBBEAN', 0x09),
('SUBLANG_ENGLISH_BELIZE', 0x0a),
('SUBLANG_ENGLISH_TRINIDAD', 0x0b),
('SUBLANG_ENGLISH_ZIMBABWE', 0x0c),
('SUBLANG_ENGLISH_PHILIPPINES', 0x0d),
('SUBLANG_FRENCH', 0x01),
('SUBLANG_FRENCH_BELGIAN', 0x02),
('SUBLANG_FRENCH_CANADIAN', 0x03),
('SUBLANG_FRENCH_SWISS', 0x04),
('SUBLANG_FRENCH_LUXEMBOURG', 0x05),
('SUBLANG_FRENCH_MONACO', 0x06),
('SUBLANG_GERMAN', 0x01),
('SUBLANG_GERMAN_SWISS', 0x02),
('SUBLANG_GERMAN_AUSTRIAN', 0x03),
('SUBLANG_GERMAN_LUXEMBOURG', 0x04),
('SUBLANG_GERMAN_LIECHTENSTEIN', 0x05),
('SUBLANG_ITALIAN', 0x01),
('SUBLANG_ITALIAN_SWISS', 0x02),
('SUBLANG_KASHMIRI_SASIA', 0x02),
('SUBLANG_KASHMIRI_INDIA', 0x02),
('SUBLANG_KOREAN', 0x01),
('SUBLANG_LITHUANIAN', 0x01),
('SUBLANG_MALAY_MALAYSIA', 0x01),
('SUBLANG_MALAY_BRUNEI_DARUSSALAM', 0x02),
('SUBLANG_NEPALI_INDIA', 0x02),
('SUBLANG_NORWEGIAN_BOKMAL', 0x01),
('SUBLANG_NORWEGIAN_NYNORSK', 0x02),
('SUBLANG_PORTUGUESE', 0x02),
('SUBLANG_PORTUGUESE_BRAZILIAN', 0x01),
('SUBLANG_SERBIAN_LATIN', 0x02),
('SUBLANG_SERBIAN_CYRILLIC', 0x03),
('SUBLANG_SPANISH', 0x01),
('SUBLANG_SPANISH_MEXICAN', 0x02),
('SUBLANG_SPANISH_MODERN', 0x03),
('SUBLANG_SPANISH_GUATEMALA', 0x04),
('SUBLANG_SPANISH_COSTA_RICA', 0x05),
('SUBLANG_SPANISH_PANAMA', 0x06),
('SUBLANG_SPANISH_DOMINICAN_REPUBLIC', 0x07),
('SUBLANG_SPANISH_VENEZUELA', 0x08),
('SUBLANG_SPANISH_COLOMBIA', 0x09),
('SUBLANG_SPANISH_PERU', 0x0a),
('SUBLANG_SPANISH_ARGENTINA', 0x0b),
('SUBLANG_SPANISH_ECUADOR', 0x0c),
('SUBLANG_SPANISH_CHILE', 0x0d),
('SUBLANG_SPANISH_URUGUAY', 0x0e),
('SUBLANG_SPANISH_PARAGUAY', 0x0f),
('SUBLANG_SPANISH_BOLIVIA', 0x10),
('SUBLANG_SPANISH_EL_SALVADOR', 0x11),
('SUBLANG_SPANISH_HONDURAS', 0x12),
('SUBLANG_SPANISH_NICARAGUA', 0x13),
('SUBLANG_SPANISH_PUERTO_RICO', 0x14),
('SUBLANG_SWEDISH', 0x01),
('SUBLANG_SWEDISH_FINLAND', 0x02),
('SUBLANG_URDU_PAKISTAN', 0x01),
('SUBLANG_URDU_INDIA', 0x02),
('SUBLANG_UZBEK_LATIN', 0x01),
('SUBLANG_UZBEK_CYRILLIC', 0x02),
('SUBLANG_DUTCH_SURINAM', 0x03),
('SUBLANG_ROMANIAN', 0x01),
('SUBLANG_ROMANIAN_MOLDAVIA', 0x02),
('SUBLANG_RUSSIAN', 0x01),
('SUBLANG_RUSSIAN_MOLDAVIA', 0x02),
('SUBLANG_CROATIAN', 0x01),
('SUBLANG_LITHUANIAN_CLASSIC', 0x02),
('SUBLANG_GAELIC', 0x01),
('SUBLANG_GAELIC_SCOTTISH', 0x02),
('SUBLANG_GAELIC_MANX', 0x03) ]
SUBLANG = dict(sublang+[(e[1], e[0]) for e in sublang])
class UnicodeStringWrapperPostProcessor:
"""This class attemps to help the process of identifying strings
that might be plain Unicode or Pascal. A list of strings will be
wrapped on it with the hope the overlappings will help make the
decission about their type."""
def __init__(self, pe, rva_ptr):
self.pe = pe
self.rva_ptr = rva_ptr
self.string = None
def get_rva(self):
"""Get the RVA of the string."""
return self.rva_ptr
def __str__(self):
"""Return the escaped ASCII representation of the string."""
def convert_char(char):
if char in string.printable:
return char
else:
return r'\x%02x' % ord(char)
if self.string:
return ''.join([convert_char(c) for c in self.string])
return ''
def invalidate(self):
"""Make this instance None, to express it's no known string type."""
self = None
def render_pascal_16(self):
self.string = self.pe.get_string_u_at_rva(
self.rva_ptr+2,
max_length=self.__get_pascal_16_length())
def ask_pascal_16(self, next_rva_ptr):
"""The next RVA is taken to be the one immediately following this one.
Such RVA could indicate the natural end of the string and will be checked
with the possible length contained in the first word.
"""
length = self.__get_pascal_16_length()
if length == (next_rva_ptr - (self.rva_ptr+2)) / 2:
self.length = length
return True
return False
def __get_pascal_16_length(self):
return self.__get_word_value_at_rva(self.rva_ptr)
def __get_word_value_at_rva(self, rva):
try:
data = self.pe.get_data(self.rva_ptr, 2)
except PEFormatError, e:
return False
if len(data)<2:
return False
return struct.unpack('<H', data)[0]
#def render_pascal_8(self):
# """"""
def ask_unicode_16(self, next_rva_ptr):
"""The next RVA is taken to be the one immediately following this one.
Such RVA could indicate the natural end of the string and will be checked
to see if there's a Unicode NULL character there.
"""
if self.__get_word_value_at_rva(next_rva_ptr-2) == 0:
self.length = next_rva_ptr - self.rva_ptr
return True
return False
def render_unicode_16(self):
""""""
self.string = self.pe.get_string_u_at_rva(self.rva_ptr)
class PEFormatError(Exception):
"""Generic PE format error exception."""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Dump:
"""Convenience class for dumping the PE information."""
def __init__(self):
self.text = ''
def add_lines(self, txt, indent=0):
"""Adds a list of lines.
The list can be indented with the optional argument 'indent'.
"""
for line in txt:
self.add_line(line, indent)
def add_line(self, txt, indent=0):
"""Adds a line.
The line can be indented with the optional argument 'indent'.
"""
self.add(txt+'\n', indent)
def add(self, txt, indent=0):
"""Adds some text, no newline will be appended.
The text can be indented with the optional argument 'indent'.
"""
if isinstance(txt, unicode):
s = []
for c in txt:
try:
s.append(str(c))
except UnicodeEncodeError, e:
s.append(repr(c))
txt = ''.join(s)
self.text += ' '*indent+txt
def add_header(self, txt):
"""Adds a header element."""
self.add_line('-'*10+txt+'-'*10+'\n')
def add_newline(self):
"""Adds a newline."""
self.text += '\n'
def get_text(self):
"""Get the text in its current state."""
return self.text
class Structure:
"""Prepare structure object to extract members from data.
Format is a list containing definitions for the elements
of the structure.
"""
def __init__(self, format, name=None, file_offset=None):
# Format is forced little endian, for big endian non Intel platforms
self.__format__ = '<'
self.__keys__ = []
# self.values = {}
self.__format_length__ = 0
self.__set_format__(format[1])
self._all_zeroes = False
self.__unpacked_data_elms__ = None
self.__file_offset__ = file_offset
if name:
self.name = name
else:
self.name = format[0]
def __get_format__(self):
return self.__format__
def get_file_offset(self):
return self.__file_offset__
def set_file_offset(self, offset):
self.__file_offset__ = offset
def all_zeroes(self):
"""Returns true is the unpacked data is all zeroes."""
return self._all_zeroes
def __set_format__(self, format):
for elm in format:
if ',' in elm:
elm_type, elm_name = elm.split(',', 1)
self.__format__ += elm_type
elm_names = elm_name.split(',')
names = []
for elm_name in elm_names:
if elm_name in self.__keys__:
search_list = [x[:len(elm_name)] for x in self.__keys__]
occ_count = search_list.count(elm_name)
elm_name = elm_name+'_'+str(occ_count)
names.append(elm_name)
# Some PE header structures have unions on them, so a certain
# value might have different names, so each key has a list of
# all the possible members referring to the data.
self.__keys__.append(names)
self.__format_length__ = struct.calcsize(self.__format__)
def sizeof(self):
"""Return size of the structure."""
return self.__format_length__
def __unpack__(self, data):
if len(data)>self.__format_length__:
data = data[:self.__format_length__]
# OC Patch:
# Some malware have incorrect header lengths.
# Fail gracefully if this occurs
# Buggy malware: a29b0118af8b7408444df81701ad5a7f
#
elif len(data)<self.__format_length__:
raise PEFormatError('Data length less than expected header length.')
if data.count(chr(0)) == len(data):
self._all_zeroes = True
self.__unpacked_data_elms__ = struct.unpack(self.__format__, data)
for i in xrange(len(self.__unpacked_data_elms__)):
for key in self.__keys__[i]:
# self.values[key] = self.__unpacked_data_elms__[i]
setattr(self, key, self.__unpacked_data_elms__[i])
def __pack__(self):
new_values = []
for i in xrange(len(self.__unpacked_data_elms__)):
for key in self.__keys__[i]:
new_val = getattr(self, key)
old_val = self.__unpacked_data_elms__[i]
# In the case of Unions, when the first changed value
# is picked the loop is exited
if new_val != old_val:
break
new_values.append(new_val)
return struct.pack(self.__format__, *new_values)
def __str__(self):
return '\n'.join( self.dump() )
def __repr__(self):
return '<Structure: %s>' % (' '.join( [' '.join(s.split()) for s in self.dump()] ))
def dump(self, indentation=0):
"""Returns a string representation of the structure."""
dump = []
dump.append('[%s]' % self.name)
# Refer to the __set_format__ method for an explanation
# of the following construct.
for keys in self.__keys__:
for key in keys:
val = getattr(self, key)
if isinstance(val, int) or isinstance(val, long):
val_str = '0x%-8X' % (val)
if key == 'TimeDateStamp' or key == 'dwTimeStamp':
try:
val_str += ' [%s UTC]' % time.asctime(time.gmtime(val))
except exceptions.ValueError, e:
val_str += ' [INVALID TIME]'
else:
val_str = ''.join(filter(lambda c:c != '\0', str(val)))
dump.append('%-30s %s' % (key+':', val_str))
return dump
class SectionStructure(Structure):
"""Convenience section handling class."""
def get_data(self, start, length=None):
"""Get data chunk from a section.
Allows to query data from the section by passing the
addresses where the PE file would be loaded by default.
It is then possible to retrieve code and data by its real
addresses as it would be if loaded.
"""
offset = start - self.VirtualAddress
if length:
end = offset+length
else:
end = len(self.data)
return self.data[offset:end]
def get_rva_from_offset(self, offset):
return offset - self.PointerToRawData + self.VirtualAddress
def get_offset_from_rva(self, rva):
return (rva - self.VirtualAddress) + self.PointerToRawData
def contains_offset(self, offset):
"""Check whether the section contains the file offset provided."""
if not self.PointerToRawData:
# bss and other sections containing only uninitialized data must have 0
# and do not take space in the file
return False
return self.PointerToRawData <= offset < self.VirtualAddress + self.SizeOfRawData
def contains_rva(self, rva):
"""Check whether the section contains the address provided."""
# PECOFF documentation v8 says:
# The total size of the section when loaded into memory.
# If this value is greater than SizeOfRawData, the section is zero-padded.
# This field is valid only for executable images and should be set to zero
# for object files.
if len(self.data) < self.SizeOfRawData:
size = self.Misc_VirtualSize
else:
size = max(self.SizeOfRawData, self.Misc_VirtualSize)
return self.VirtualAddress <= rva < self.VirtualAddress + size
def contains(self, rva):
#print "DEPRECATION WARNING: you should use contains_rva() instead of contains()"
return self.contains_rva(rva)
def set_data(self, data):
"""Set the data belonging to the section."""
self.data = data
def get_entropy(self):
"""Calculate and return the entropy for the section."""
return self.entropy_H( self.data )
def get_hash_sha1(self):
"""Get the SHA-1 hex-digest of the section's data."""
if sha1 is not None:
return sha1( self.data ).hexdigest()
def get_hash_sha256(self):
"""Get the SHA-256 hex-digest of the section's data."""
if sha256 is not None:
return sha256( self.data ).hexdigest()
def get_hash_sha512(self):
"""Get the SHA-512 hex-digest of the section's data."""
if sha512 is not None:
return sha512( self.data ).hexdigest()
def get_hash_md5(self):
"""Get the MD5 hex-digest of the section's data."""
if md5 is not None:
return md5( self.data ).hexdigest()
def entropy_H(self, data):
"""Calculate the entropy of a chunk of data."""
if len(data) == 0:
return 0.0
occurences = array.array('L', [0]*256)
for x in data:
occurences[ord(x)] += 1
entropy = 0
for x in occurences:
if x:
p_x = float(x) / len(data)
entropy -= p_x*math.log(p_x, 2)
return entropy
class DataContainer:
"""Generic data container."""
def __init__(self, **args):
for key, value in args.items():
setattr(self, key, value)
class ImportDescData(DataContainer):
"""Holds import descriptor information.
dll: name of the imported DLL
imports: list of imported symbols (ImportData instances)
struct: IMAGE_IMPORT_DESCRIPTOR sctruture
"""
class ImportData(DataContainer):
"""Holds imported symbol's information.
ordinal: Ordinal of the symbol
name: Name of the symbol
bound: If the symbol is bound, this contains
the address.
"""
class ExportDirData(DataContainer):
"""Holds export directory information.
struct: IMAGE_EXPORT_DIRECTORY structure
symbols: list of exported symbols (ExportData instances)
"""
class ExportData(DataContainer):
"""Holds exported symbols' information.
ordinal: ordinal of the symbol
address: address of the symbol
name: name of the symbol (None if the symbol is
exported by ordinal only)
forwarder: if the symbol is forwarded it will
contain the name of the target symbol,
None otherwise.
"""
class ResourceDirData(DataContainer):
"""Holds resource directory information.
struct: IMAGE_RESOURCE_DIRECTORY structure
entries: list of entries (ResourceDirEntryData instances)
"""
class ResourceDirEntryData(DataContainer):
"""Holds resource directory entry data.
struct: IMAGE_RESOURCE_DIRECTORY_ENTRY structure
name: If the resource is identified by name this
attribute will contain the name string. None
otherwise. If identified by id, the id is
availabe at 'struct.Id'
id: the id, also in struct.Id
directory: If this entry has a lower level directory
this attribute will point to the
ResourceDirData instance representing it.
data: If this entry has no futher lower directories
and points to the actual resource data, this
attribute will reference the corresponding
ResourceDataEntryData instance.
(Either of the 'directory' or 'data' attribute will exist,
but not both.)
"""
class ResourceDataEntryData(DataContainer):
"""Holds resource data entry information.
struct: IMAGE_RESOURCE_DATA_ENTRY structure
lang: Primary language ID
sublang: Sublanguage ID
"""
class DebugData(DataContainer):
"""Holds debug information.
struct: IMAGE_DEBUG_DIRECTORY structure
"""
class BaseRelocationData(DataContainer):
"""Holds base relocation information.
struct: IMAGE_BASE_RELOCATION structure
entries: list of relocation data (RelocationData instances)
"""
class RelocationData(DataContainer):
"""Holds relocation information.
type: Type of relocation
The type string is can be obtained by
RELOCATION_TYPE[type]
rva: RVA of the relocation
"""
class TlsData(DataContainer):
"""Holds TLS information.
struct: IMAGE_TLS_DIRECTORY structure
"""
class BoundImportDescData(DataContainer):
"""Holds bound import descriptor data.
This directory entry will provide with information on the
DLLs this PE files has been bound to (if bound at all).
The structure will contain the name and timestamp of the
DLL at the time of binding so that the loader can know
whether it differs from the one currently present in the
system and must, therefore, re-bind the PE's imports.
struct: IMAGE_BOUND_IMPORT_DESCRIPTOR structure
name: DLL name
entries: list of entries (BoundImportRefData instances)
the entries will exist if this DLL has forwarded
symbols. If so, the destination DLL will have an
entry in this list.
"""
class BoundImportRefData(DataContainer):
"""Holds bound import forwader reference data.
Contains the same information as the bound descriptor but
for forwarded DLLs, if any.
struct: IMAGE_BOUND_FORWARDER_REF structure
name: dll name
"""
class PE:
"""A Portable Executable representation.
This class provides access to most of the information in a PE file.
It expects to be supplied the name of the file to load or PE data
to process and an optional argument 'fast_load' (False by default)
which controls whether to load all the directories information,
which can be quite time consuming.
pe = pefile.PE('module.dll')
pe = pefile.PE(name='module.dll')
would load 'module.dll' and process it. If the data would be already
available in a buffer the same could be achieved with:
pe = pefile.PE(data=module_dll_data)
The "fast_load" can be set to a default by setting its value in the
module itself by means,for instance, of a "pefile.fast_load = True".
That will make all the subsequent instances not to load the
whole PE structure. The "full_load" method can be used to parse
the missing data at a later stage.
Basic headers information will be available in the attributes:
DOS_HEADER
NT_HEADERS
FILE_HEADER
OPTIONAL_HEADER
All of them will contain among their attrbitues the members of the
corresponding structures as defined in WINNT.H
The raw data corresponding to the header (from the beginning of the
file up to the start of the first section) will be avaiable in the
instance's attribute 'header' as a string.
The sections will be available as a list in the 'sections' attribute.
Each entry will contain as attributes all the structure's members.
Directory entries will be available as attributes (if they exist):
(no other entries are processed at this point)
DIRECTORY_ENTRY_IMPORT (list of ImportDescData instances)
DIRECTORY_ENTRY_EXPORT (ExportDirData instance)
DIRECTORY_ENTRY_RESOURCE (ResourceDirData instance)
DIRECTORY_ENTRY_DEBUG (list of DebugData instances)
DIRECTORY_ENTRY_BASERELOC (list of BaseRelocationData instances)
DIRECTORY_ENTRY_TLS
DIRECTORY_ENTRY_BOUND_IMPORT (list of BoundImportData instances)
The following dictionary attributes provide ways of mapping different
constants. They will accept the numeric value and return the string
representation and the opposite, feed in the string and get the
numeric constant:
DIRECTORY_ENTRY
IMAGE_CHARACTERISTICS
SECTION_CHARACTERISTICS
DEBUG_TYPE
SUBSYSTEM_TYPE
MACHINE_TYPE
RELOCATION_TYPE
RESOURCE_TYPE
LANG
SUBLANG
"""
#
# Format specifications for PE structures.
#
__IMAGE_DOS_HEADER_format__ = ('IMAGE_DOS_HEADER',
('H,e_magic', 'H,e_cblp', 'H,e_cp',
'H,e_crlc', 'H,e_cparhdr', 'H,e_minalloc',
'H,e_maxalloc', 'H,e_ss', 'H,e_sp', 'H,e_csum',
'H,e_ip', 'H,e_cs', 'H,e_lfarlc', 'H,e_ovno', '8s,e_res',
'H,e_oemid', 'H,e_oeminfo', '20s,e_res2',
'L,e_lfanew'))
__IMAGE_FILE_HEADER_format__ = ('IMAGE_FILE_HEADER',
('H,Machine', 'H,NumberOfSections',
'L,TimeDateStamp', 'L,PointerToSymbolTable',
'L,NumberOfSymbols', 'H,SizeOfOptionalHeader',
'H,Characteristics'))
__IMAGE_DATA_DIRECTORY_format__ = ('IMAGE_DATA_DIRECTORY',
('L,VirtualAddress', 'L,Size'))
__IMAGE_OPTIONAL_HEADER_format__ = ('IMAGE_OPTIONAL_HEADER',
('H,Magic', 'B,MajorLinkerVersion',
'B,MinorLinkerVersion', 'L,SizeOfCode',
'L,SizeOfInitializedData', 'L,SizeOfUninitializedData',
'L,AddressOfEntryPoint', 'L,BaseOfCode', 'L,BaseOfData',
'L,ImageBase', 'L,SectionAlignment', 'L,FileAlignment',
'H,MajorOperatingSystemVersion', 'H,MinorOperatingSystemVersion',
'H,MajorImageVersion', 'H,MinorImageVersion',
'H,MajorSubsystemVersion', 'H,MinorSubsystemVersion',
'L,Reserved1', 'L,SizeOfImage', 'L,SizeOfHeaders',
'L,CheckSum', 'H,Subsystem', 'H,DllCharacteristics',
'L,SizeOfStackReserve', 'L,SizeOfStackCommit',
'L,SizeOfHeapReserve', 'L,SizeOfHeapCommit',
'L,LoaderFlags', 'L,NumberOfRvaAndSizes' ))
__IMAGE_OPTIONAL_HEADER64_format__ = ('IMAGE_OPTIONAL_HEADER64',
('H,Magic', 'B,MajorLinkerVersion',
'B,MinorLinkerVersion', 'L,SizeOfCode',
'L,SizeOfInitializedData', 'L,SizeOfUninitializedData',
'L,AddressOfEntryPoint', 'L,BaseOfCode',
'Q,ImageBase', 'L,SectionAlignment', 'L,FileAlignment',
'H,MajorOperatingSystemVersion', 'H,MinorOperatingSystemVersion',
'H,MajorImageVersion', 'H,MinorImageVersion',
'H,MajorSubsystemVersion', 'H,MinorSubsystemVersion',
'L,Reserved1', 'L,SizeOfImage', 'L,SizeOfHeaders',
'L,CheckSum', 'H,Subsystem', 'H,DllCharacteristics',
'Q,SizeOfStackReserve', 'Q,SizeOfStackCommit',
'Q,SizeOfHeapReserve', 'Q,SizeOfHeapCommit',
'L,LoaderFlags', 'L,NumberOfRvaAndSizes' ))
__IMAGE_NT_HEADERS_format__ = ('IMAGE_NT_HEADERS', ('L,Signature',))
__IMAGE_SECTION_HEADER_format__ = ('IMAGE_SECTION_HEADER',
('8s,Name', 'L,Misc,Misc_PhysicalAddress,Misc_VirtualSize',
'L,VirtualAddress', 'L,SizeOfRawData', 'L,PointerToRawData',
'L,PointerToRelocations', 'L,PointerToLinenumbers',
'H,NumberOfRelocations', 'H,NumberOfLinenumbers',
'L,Characteristics'))
__IMAGE_DELAY_IMPORT_DESCRIPTOR_format__ = ('IMAGE_DELAY_IMPORT_DESCRIPTOR',
('L,grAttrs', 'L,szName', 'L,phmod', 'L,pIAT', 'L,pINT',
'L,pBoundIAT', 'L,pUnloadIAT', 'L,dwTimeStamp'))
__IMAGE_IMPORT_DESCRIPTOR_format__ = ('IMAGE_IMPORT_DESCRIPTOR',
('L,OriginalFirstThunk,Characteristics',
'L,TimeDateStamp', 'L,ForwarderChain', 'L,Name', 'L,FirstThunk'))
__IMAGE_EXPORT_DIRECTORY_format__ = ('IMAGE_EXPORT_DIRECTORY',
('L,Characteristics',
'L,TimeDateStamp', 'H,MajorVersion', 'H,MinorVersion', 'L,Name',
'L,Base', 'L,NumberOfFunctions', 'L,NumberOfNames',
'L,AddressOfFunctions', 'L,AddressOfNames', 'L,AddressOfNameOrdinals'))
__IMAGE_RESOURCE_DIRECTORY_format__ = ('IMAGE_RESOURCE_DIRECTORY',
('L,Characteristics',
'L,TimeDateStamp', 'H,MajorVersion', 'H,MinorVersion',
'H,NumberOfNamedEntries', 'H,NumberOfIdEntries'))
__IMAGE_RESOURCE_DIRECTORY_ENTRY_format__ = ('IMAGE_RESOURCE_DIRECTORY_ENTRY',
('L,Name',
'L,OffsetToData'))
__IMAGE_RESOURCE_DATA_ENTRY_format__ = ('IMAGE_RESOURCE_DATA_ENTRY',
('L,OffsetToData', 'L,Size', 'L,CodePage', 'L,Reserved'))
__VS_VERSIONINFO_format__ = ( 'VS_VERSIONINFO',
('H,Length', 'H,ValueLength', 'H,Type' ))
__VS_FIXEDFILEINFO_format__ = ( 'VS_FIXEDFILEINFO',
('L,Signature', 'L,StrucVersion', 'L,FileVersionMS', 'L,FileVersionLS',
'L,ProductVersionMS', 'L,ProductVersionLS', 'L,FileFlagsMask', 'L,FileFlags',
'L,FileOS', 'L,FileType', 'L,FileSubtype', 'L,FileDateMS', 'L,FileDateLS'))
__StringFileInfo_format__ = ( 'StringFileInfo',
('H,Length', 'H,ValueLength', 'H,Type' ))
__StringTable_format__ = ( 'StringTable',
('H,Length', 'H,ValueLength', 'H,Type' ))
__String_format__ = ( 'String',
('H,Length', 'H,ValueLength', 'H,Type' ))
__Var_format__ = ( 'Var', ('H,Length', 'H,ValueLength', 'H,Type' ))
__IMAGE_THUNK_DATA_format__ = ('IMAGE_THUNK_DATA',
('L,ForwarderString,Function,Ordinal,AddressOfData',))
__IMAGE_THUNK_DATA64_format__ = ('IMAGE_THUNK_DATA',
('Q,ForwarderString,Function,Ordinal,AddressOfData',))
__IMAGE_DEBUG_DIRECTORY_format__ = ('IMAGE_DEBUG_DIRECTORY',
('L,Characteristics', 'L,TimeDateStamp', 'H,MajorVersion',
'H,MinorVersion', 'L,Type', 'L,SizeOfData', 'L,AddressOfRawData',
'L,PointerToRawData'))
__IMAGE_BASE_RELOCATION_format__ = ('IMAGE_BASE_RELOCATION',
('L,VirtualAddress', 'L,SizeOfBlock') )
__IMAGE_TLS_DIRECTORY_format__ = ('IMAGE_TLS_DIRECTORY',
('L,StartAddressOfRawData', 'L,EndAddressOfRawData',
'L,AddressOfIndex', 'L,AddressOfCallBacks',
'L,SizeOfZeroFill', 'L,Characteristics' ) )
__IMAGE_TLS_DIRECTORY64_format__ = ('IMAGE_TLS_DIRECTORY',
('Q,StartAddressOfRawData', 'Q,EndAddressOfRawData',
'Q,AddressOfIndex', 'Q,AddressOfCallBacks',
'L,SizeOfZeroFill', 'L,Characteristics' ) )
__IMAGE_BOUND_IMPORT_DESCRIPTOR_format__ = ('IMAGE_BOUND_IMPORT_DESCRIPTOR',
('L,TimeDateStamp', 'H,OffsetModuleName', 'H,NumberOfModuleForwarderRefs'))
__IMAGE_BOUND_FORWARDER_REF_format__ = ('IMAGE_BOUND_FORWARDER_REF',
('L,TimeDateStamp', 'H,OffsetModuleName', 'H,Reserved') )
def __init__(self, name=None, data=None, fast_load=None):
self.sections = []
self.__warnings = []
self.PE_TYPE = None
if not name and not data:
return
# This list will keep track of all the structures created.
# That will allow for an easy iteration through the list
# in order to save the modifications made
self.__structures__ = []
if not fast_load:
fast_load = globals()['fast_load']
self.__parse__(name, data, fast_load)
def __unpack_data__(self, format, data, file_offset):
"""Apply structure format to raw data.
Returns and unpacked structure object if successful, None otherwise.
"""
structure = Structure(format, file_offset=file_offset)
#if len(data) < structure.sizeof():
# return None
try:
structure.__unpack__(data)
except PEFormatError, err:
self.__warnings.append(
'Corrupt header "%s" at file offset %d. Exception: %s' % (
format[0], file_offset, str(err)) )
return None
self.__structures__.append(structure)
return structure
def __parse__(self, fname, data, fast_load):
"""Parse a Portable Executable file.
Loads a PE file, parsing all its structures and making them available
through the instance's attributes.
"""
if fname:
fd = file(fname, 'rb')
self.__data__ = fd.read()
fd.close()
elif data:
self.__data__ = data
self.DOS_HEADER = self.__unpack_data__(
self.__IMAGE_DOS_HEADER_format__,
self.__data__, file_offset=0)
if not self.DOS_HEADER or self.DOS_HEADER.e_magic != IMAGE_DOS_SIGNATURE:
raise PEFormatError('DOS Header magic not found.')
# OC Patch:
# Check for sane value in e_lfanew
#
if self.DOS_HEADER.e_lfanew > len(self.__data__):
raise PEFormatError('Invalid e_lfanew value, probably not a PE file')
nt_headers_offset = self.DOS_HEADER.e_lfanew
self.NT_HEADERS = self.__unpack_data__(
self.__IMAGE_NT_HEADERS_format__,
self.__data__[nt_headers_offset:],
file_offset = nt_headers_offset)
# We better check the signature right here, before the file screws
# around with sections:
# OC Patch:
# Some malware will cause the Signature value to not exist at all
if not self.NT_HEADERS or not self.NT_HEADERS.Signature:
raise PEFormatError('NT Headers not found.')
if self.NT_HEADERS.Signature != IMAGE_NT_SIGNATURE:
raise PEFormatError('Invalid NT Headers signature.')
self.FILE_HEADER = self.__unpack_data__(
self.__IMAGE_FILE_HEADER_format__,
self.__data__[nt_headers_offset+4:],
file_offset = nt_headers_offset+4)
image_flags = self.retrieve_flags(IMAGE_CHARACTERISTICS, 'IMAGE_FILE_')
if not self.FILE_HEADER:
raise PEFormatError('File Header missing')
# Set the image's flags according the the Characteristics member
self.set_flags(self.FILE_HEADER, self.FILE_HEADER.Characteristics, image_flags)
optional_header_offset = \
nt_headers_offset+4+self.FILE_HEADER.sizeof()
# Note: location of sections can be controlled from PE header:
sections_offset = optional_header_offset + self.FILE_HEADER.SizeOfOptionalHeader
self.OPTIONAL_HEADER = self.__unpack_data__(
self.__IMAGE_OPTIONAL_HEADER_format__,
self.__data__[optional_header_offset:],
file_offset = optional_header_offset)
# According to solardesigner's findings for his
# Tiny PE project, the optional header does not
# need fields beyond "Subsystem" in order to be
# loadable by the Windows loader (given that zeroes
# are acceptable values and the header is loaded
# in a zeroed memory page)
# If trying to parse a full Optional Header fails
# we try to parse it again with some 0 padding
#
MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE = 69
if ( self.OPTIONAL_HEADER is None and
len(self.__data__[optional_header_offset:])
>= MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE ):
# Add enough zeroes to make up for the unused fields
#
padding_length = 128
# Create padding
#
padded_data = self.__data__[optional_header_offset:] + (
'\0' * padding_length)
self.OPTIONAL_HEADER = self.__unpack_data__(
self.__IMAGE_OPTIONAL_HEADER_format__,
padded_data,
file_offset = optional_header_offset)
# Check the Magic in the OPTIONAL_HEADER and set the PE file
# type accordingly
#
if self.OPTIONAL_HEADER is not None:
if self.OPTIONAL_HEADER.Magic == OPTIONAL_HEADER_MAGIC_PE:
self.PE_TYPE = OPTIONAL_HEADER_MAGIC_PE
elif self.OPTIONAL_HEADER.Magic == OPTIONAL_HEADER_MAGIC_PE_PLUS:
self.PE_TYPE = OPTIONAL_HEADER_MAGIC_PE_PLUS
self.OPTIONAL_HEADER = self.__unpack_data__(
self.__IMAGE_OPTIONAL_HEADER64_format__,
self.__data__[optional_header_offset:],
file_offset = optional_header_offset)
# Again, as explained above, we try to parse
# a reduced form of the Optional Header which
# is still valid despite not including all
# structure members
#
MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE = 69+4
if ( self.OPTIONAL_HEADER is None and
len(self.__data__[optional_header_offset:])
>= MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE ):
padding_length = 128
padded_data = self.__data__[optional_header_offset:] + (
'\0' * padding_length)
self.OPTIONAL_HEADER = self.__unpack_data__(
self.__IMAGE_OPTIONAL_HEADER64_format__,
padded_data,
file_offset = optional_header_offset)
if not self.FILE_HEADER:
raise PEFormatError('File Header missing')
# OC Patch:
# Die gracefully if there is no OPTIONAL_HEADER field
# 975440f5ad5e2e4a92c4d9a5f22f75c1
if self.PE_TYPE is None or self.OPTIONAL_HEADER is None:
raise PEFormatError("No Optional Header found, invalid PE32 or PE32+ file")
dll_characteristics_flags = self.retrieve_flags(DLL_CHARACTERISTICS, 'IMAGE_DLL_CHARACTERISTICS_')
# Set the Dll Characteristics flags according the the DllCharacteristics member
self.set_flags(
self.OPTIONAL_HEADER,
self.OPTIONAL_HEADER.DllCharacteristics,
dll_characteristics_flags)
self.OPTIONAL_HEADER.DATA_DIRECTORY = []
#offset = (optional_header_offset + self.FILE_HEADER.SizeOfOptionalHeader)
offset = (optional_header_offset + self.OPTIONAL_HEADER.sizeof())
self.NT_HEADERS.FILE_HEADER = self.FILE_HEADER
self.NT_HEADERS.OPTIONAL_HEADER = self.OPTIONAL_HEADER
# The NumberOfRvaAndSizes is sanitized to stay within
# reasonable limits so can be casted to an int
#
if self.OPTIONAL_HEADER.NumberOfRvaAndSizes > 0x10:
self.__warnings.append(
'Suspicious NumberOfRvaAndSizes in the Optional Header. ' +
'Normal values are never larger than 0x10, the value is: 0x%x' %
self.OPTIONAL_HEADER.NumberOfRvaAndSizes )
for i in xrange(int(0x7fffffffL & self.OPTIONAL_HEADER.NumberOfRvaAndSizes)):
if len(self.__data__[offset:]) == 0:
break
if len(self.__data__[offset:]) < 8:
data = self.__data__[offset:]+'\0'*8
else:
data = self.__data__[offset:]
dir_entry = self.__unpack_data__(
self.__IMAGE_DATA_DIRECTORY_format__,
data,
file_offset = offset)
if dir_entry is None:
break
# Would fail if missing an entry
# 1d4937b2fa4d84ad1bce0309857e70ca offending sample
try:
dir_entry.name = DIRECTORY_ENTRY[i]
except (KeyError, AttributeError):
break
offset += dir_entry.sizeof()
self.OPTIONAL_HEADER.DATA_DIRECTORY.append(dir_entry)
# If the offset goes outside the optional header,
# the loop is broken, regardless of how many directories
# NumberOfRvaAndSizes says there are
#
# We assume a normally sized optional header, hence that we do
# a sizeof() instead of reading SizeOfOptionalHeader.
# Then we add a default number of drectories times their size,
# if we go beyond that, we assume the number of directories
# is wrong and stop processing
if offset >= (optional_header_offset +
self.OPTIONAL_HEADER.sizeof() + 8*16) :
break
offset = self.parse_sections(sections_offset)
# OC Patch:
# There could be a problem if there are no raw data sections
# greater than 0
# fc91013eb72529da005110a3403541b6 example
# Should this throw an exception in the minimum header offset
# can't be found?
#
rawDataPointers = [
s.PointerToRawData for s in self.sections if s.PointerToRawData>0]
if len(rawDataPointers) > 0:
lowest_section_offset = min(rawDataPointers)
else:
lowest_section_offset = None
if not lowest_section_offset or lowest_section_offset<offset:
self.header = self.__data__[:offset]
else:
self.header = self.__data__[:lowest_section_offset]
# Check whether the entry point lies within a section
#
if self.get_section_by_rva(self.OPTIONAL_HEADER.AddressOfEntryPoint) is not None:
# Check whether the entry point lies within the file
#
ep_offset = self.get_offset_from_rva(self.OPTIONAL_HEADER.AddressOfEntryPoint)
if ep_offset > len(self.__data__):
self.__warnings.append(
'Possibly corrupt file. AddressOfEntryPoint lies outside the file. ' +
'AddressOfEntryPoint: 0x%x' %
self.OPTIONAL_HEADER.AddressOfEntryPoint )
else:
self.__warnings.append(
'AddressOfEntryPoint lies outside the sections\' boundaries. ' +
'AddressOfEntryPoint: 0x%x' %
self.OPTIONAL_HEADER.AddressOfEntryPoint )
if not fast_load:
self.parse_data_directories()
def get_warnings(self):
"""Return the list of warnings.
Non-critical problems found when parsing the PE file are
appended to a list of warnings. This method returns the
full list.
"""
return self.__warnings
def show_warnings(self):
"""Print the list of warnings.
Non-critical problems found when parsing the PE file are
appended to a list of warnings. This method prints the
full list to standard output.
"""
for warning in self.__warnings:
print '>', warning
def full_load(self):
"""Process the data directories.
This mathod will load the data directories which might not have
been loaded if the "fast_load" option was used.
"""
self.parse_data_directories()
def write(self, filename=None):
"""Write the PE file.
This function will process all headers and components
of the PE file and include all changes made (by just
assigning to attributes in the PE objects) and write
the changes back to a file whose name is provided as
an argument. The filename is optional.
The data to be written to the file will be returned
as a 'str' object.
"""
file_data = list(self.__data__)
for struct in self.__structures__:
struct_data = list(struct.__pack__())
offset = struct.get_file_offset()
file_data[offset:offset+len(struct_data)] = struct_data
if hasattr(self, 'VS_VERSIONINFO'):
if hasattr(self, 'FileInfo'):
for entry in self.FileInfo:
if hasattr(entry, 'StringTable'):
for st_entry in entry.StringTable:
for key, entry in st_entry.entries.items():
offsets = st_entry.entries_offsets[key]
lengths = st_entry.entries_lengths[key]
if len( entry ) > lengths[1]:
uc = zip(
list(entry[:lengths[1]]), ['\0'] * lengths[1] )
l = list()
map(l.extend, uc)
file_data[
offsets[1] : offsets[1] + lengths[1]*2 ] = l
else:
uc = zip(
list(entry), ['\0'] * len(entry) )
l = list()
map(l.extend, uc)
file_data[
offsets[1] : offsets[1] + len(entry)*2 ] = l
remainder = lengths[1] - len(entry)
file_data[
offsets[1] + len(entry)*2 :
offsets[1] + lengths[1]*2 ] = [
u'\0' ] * remainder*2
new_file_data = ''.join( [ chr(ord(c)) for c in file_data ] )
if filename:
f = file(filename, 'wb+')
f.write(new_file_data)
f.close()
return new_file_data
def parse_sections(self, offset):
"""Fetch the PE file sections.
The sections will be readily available in the "sections" attribute.
Its attributes will contain all the section information plus "data"
a buffer containing the section's data.
The "Characteristics" member will be processed and attributes
representing the section characteristics (with the 'IMAGE_SCN_'
string trimmed from the constant's names) will be added to the
section instance.
Refer to the SectionStructure class for additional info.
"""
self.sections = []
for i in xrange(self.FILE_HEADER.NumberOfSections):
section = SectionStructure(self.__IMAGE_SECTION_HEADER_format__)
if not section:
break
section_offset = offset + section.sizeof() * i
section.set_file_offset(section_offset)
section.__unpack__(self.__data__[section_offset:])
self.__structures__.append(section)
if section.SizeOfRawData > len(self.__data__):
self.__warnings.append(
('Error parsing section %d. ' % i) +
'SizeOfRawData is larger than file.')
if section.PointerToRawData > len(self.__data__):
self.__warnings.append(
('Error parsing section %d. ' % i) +
'PointerToRawData points beyond the end of the file.')
if section.Misc_VirtualSize > 0x10000000:
self.__warnings.append(
('Suspicious value found parsing section %d. ' % i) +
'VirtualSize is extremely large > 256MiB.')
if section.VirtualAddress > 0x10000000:
self.__warnings.append(
('Suspicious value found parsing section %d. ' % i) +
'VirtualAddress is beyond 0x10000000.')
#
# Some packer used a non-aligned PointerToRawData in the sections,
# which causes several common tools not to load the section data
# properly as they blindly read from the indicated offset.
# It seems that Windows will round the offset down to the largest
# offset multiple of FileAlignment which is smaller than
# PointerToRawData. The following code will do the same.
#
#alignment = self.OPTIONAL_HEADER.FileAlignment
section_data_start = section.PointerToRawData
if ( self.OPTIONAL_HEADER.FileAlignment != 0 and
(section.PointerToRawData % self.OPTIONAL_HEADER.FileAlignment) != 0):
self.__warnings.append(
('Error parsing section %d. ' % i) +
'Suspicious value for FileAlignment in the Optional Header. ' +
'Normally the PointerToRawData entry of the sections\' structures ' +
'is a multiple of FileAlignment, this might imply the file ' +
'is trying to confuse tools which parse this incorrectly')
section_data_end = section_data_start+section.SizeOfRawData
section.set_data(self.__data__[section_data_start:section_data_end])
section_flags = self.retrieve_flags(SECTION_CHARACTERISTICS, 'IMAGE_SCN_')
# Set the section's flags according the the Characteristics member
self.set_flags(section, section.Characteristics, section_flags)
if ( section.__dict__.get('IMAGE_SCN_MEM_WRITE', False) and
section.__dict__.get('IMAGE_SCN_MEM_EXECUTE', False) ):
self.__warnings.append(
('Suspicious flags set for section %d. ' % i) +
'Both IMAGE_SCN_MEM_WRITE and IMAGE_SCN_MEM_EXECUTE are set.' +
'This might indicate a packed executable.')
self.sections.append(section)
if self.FILE_HEADER.NumberOfSections > 0 and self.sections:
return offset + self.sections[0].sizeof()*self.FILE_HEADER.NumberOfSections
else:
return offset
def retrieve_flags(self, flag_dict, flag_filter):
"""Read the flags from a dictionary and return them in a usable form.
Will return a list of (flag, value) for all flags in "flag_dict"
matching the filter "flag_filter".
"""
return [(f[0], f[1]) for f in flag_dict.items() if
isinstance(f[0], str) and f[0].startswith(flag_filter)]
def set_flags(self, obj, flag_field, flags):
"""Will process the flags and set attributes in the object accordingly.
The object "obj" will gain attritutes named after the flags provided in
"flags" and valued True/False, matching the results of applyin each
flag value from "flags" to flag_field.
"""
for flag in flags:
if flag[1] & flag_field:
setattr(obj, flag[0], True)
else:
setattr(obj, flag[0], False)
def parse_data_directories(self):
"""Parse and process the PE file's data directories."""
directory_parsing = (
('IMAGE_DIRECTORY_ENTRY_IMPORT', self.parse_import_directory),
('IMAGE_DIRECTORY_ENTRY_EXPORT', self.parse_export_directory),
('IMAGE_DIRECTORY_ENTRY_RESOURCE', self.parse_resources_directory),
('IMAGE_DIRECTORY_ENTRY_DEBUG', self.parse_debug_directory),
('IMAGE_DIRECTORY_ENTRY_BASERELOC', self.parse_relocations_directory),
('IMAGE_DIRECTORY_ENTRY_TLS', self.parse_directory_tls),
('IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT', self.parse_delay_import_directory),
('IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT', self.parse_directory_bound_imports) )
for entry in directory_parsing:
# OC Patch:
#
try:
dir_entry = self.OPTIONAL_HEADER.DATA_DIRECTORY[
DIRECTORY_ENTRY[entry[0]]]
except IndexError:
break
if dir_entry.VirtualAddress:
value = entry[1](dir_entry.VirtualAddress, dir_entry.Size)
if value:
setattr(self, entry[0][6:], value)
def parse_directory_bound_imports(self, rva, size):
""""""
bnd_descr = Structure(self.__IMAGE_BOUND_IMPORT_DESCRIPTOR_format__)
bnd_descr_size = bnd_descr.sizeof()
start = rva
bound_imports = []
while True:
bnd_descr = self.__unpack_data__(
self.__IMAGE_BOUND_IMPORT_DESCRIPTOR_format__,
self.__data__[rva:rva+bnd_descr_size],
file_offset = rva)
if bnd_descr is None:
# If can't parse directory then silently return.
# This directory does not necesarily have to be valid to
# still have a valid PE file
self.__warnings.append(
'The Bound Imports directory exists but can\'t be parsed.')
return
if bnd_descr.all_zeroes():
break
rva += bnd_descr.sizeof()
forwarder_refs = []
for idx in xrange(bnd_descr.NumberOfModuleForwarderRefs):
# Both structures IMAGE_BOUND_IMPORT_DESCRIPTOR and
# IMAGE_BOUND_FORWARDER_REF have the same size.
bnd_frwd_ref = self.__unpack_data__(
self.__IMAGE_BOUND_FORWARDER_REF_format__,
self.__data__[rva:rva+bnd_descr_size],
file_offset = rva)
# OC Patch:
if not bnd_frwd_ref:
raise PEFormatError(
"IMAGE_BOUND_FORWARDER_REF cannot be read")
rva += bnd_frwd_ref.sizeof()
name_str = self.get_string_from_data(
start+bnd_frwd_ref.OffsetModuleName, self.__data__)
if not name_str:
break
forwarder_refs.append(BoundImportRefData(
struct = bnd_frwd_ref,
name = name_str))
name_str = self.get_string_from_data(
start+bnd_descr.OffsetModuleName, self.__data__)
if not name_str:
break
bound_imports.append(
BoundImportDescData(
struct = bnd_descr,
name = name_str,
entries = forwarder_refs))
return bound_imports
def parse_directory_tls(self, rva, size):
""""""
if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE:
format = self.__IMAGE_TLS_DIRECTORY_format__
elif self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS:
format = self.__IMAGE_TLS_DIRECTORY64_format__
tls_struct = self.__unpack_data__(
format,
self.get_data(rva),
file_offset = self.get_offset_from_rva(rva))
if not tls_struct:
return None
return TlsData( struct = tls_struct )
def parse_relocations_directory(self, rva, size):
""""""
rlc = Structure(self.__IMAGE_BASE_RELOCATION_format__)
rlc_size = rlc.sizeof()
end = rva+size
relocations = []
while rva<end:
# OC Patch:
# Malware that has bad rva entries will cause an error.
# Just continue on after an exception
#
try:
rlc = self.__unpack_data__(
self.__IMAGE_BASE_RELOCATION_format__,
self.get_data(rva, rlc_size),
file_offset = self.get_offset_from_rva(rva) )
except PEFormatError:
self.__warnings.append(
'Invalid relocation information. Can\'t read ' +
'data at RVA: 0x%x' % rva)
rlc = None
if not rlc:
break
reloc_entries = self.parse_relocations(
rva+rlc_size, rlc.VirtualAddress, rlc.SizeOfBlock-rlc_size)
relocations.append(
BaseRelocationData(
struct = rlc,
entries = reloc_entries))
if not rlc.SizeOfBlock:
break
rva += rlc.SizeOfBlock
return relocations
def parse_relocations(self, data_rva, rva, size):
""""""
data = self.get_data(data_rva, size)
entries = []
for idx in xrange(len(data)/2):
word = struct.unpack('<H', data[idx*2:(idx+1)*2])[0]
reloc_type = (word>>12)
reloc_offset = (word&0x0fff)
entries.append(
RelocationData(
type = reloc_type,
rva = reloc_offset+rva))
return entries
def parse_debug_directory(self, rva, size):
""""""
dbg = Structure(self.__IMAGE_DEBUG_DIRECTORY_format__)
dbg_size = dbg.sizeof()
debug = []
for idx in xrange(size/dbg_size):
try:
data = self.get_data(rva+dbg_size*idx, dbg_size)
except PEFormatError, e:
self.__warnings.append(
'Invalid debug information. Can\'t read ' +
'data at RVA: 0x%x' % rva)
return None
dbg = self.__unpack_data__(
self.__IMAGE_DEBUG_DIRECTORY_format__,
data, file_offset = self.get_offset_from_rva(rva+dbg_size*idx))
if not dbg:
return None
debug.append(
DebugData(
struct = dbg))
return debug
def parse_resources_directory(self, rva, size=0, base_rva = None, level = 0):
"""Parse the resources directory.
Given the rva of the resources directory, it will process all
its entries.
The root will have the corresponding member of its structure,
IMAGE_RESOURCE_DIRECTORY plus 'entries', a list of all the
entries in the directory.
Those entries will have, correspondingly, all the structure's
members (IMAGE_RESOURCE_DIRECTORY_ENTRY) and an additional one,
"directory", pointing to the IMAGE_RESOURCE_DIRECTORY structure
representing upper layers of the tree. This one will also have
an 'entries' attribute, pointing to the 3rd, and last, level.
Another directory with more entries. Those last entries will
have a new atribute (both 'leaf' or 'data_entry' can be used to
access it). This structure finally points to the resource data.
All the members of this structure, IMAGE_RESOURCE_DATA_ENTRY,
are available as its attributes.
"""
# OC Patch:
original_rva = rva
if base_rva is None:
base_rva = rva
resources_section = self.get_section_by_rva(rva)
try:
# If the RVA is invalid all would blow up. Some EXEs seem to be
# specially nasty and have an invalid RVA.
data = self.get_data(rva)
except PEFormatError, e:
self.__warnings.append(
'Invalid resources directory. Can\'t read ' +
'directory data at RVA: 0x%x' % rva)
return None
# Get the resource directory structure, that is, the header
# of the table preceding the actual entries
#
resource_dir = self.__unpack_data__(
self.__IMAGE_RESOURCE_DIRECTORY_format__, data,
file_offset = self.get_offset_from_rva(rva) )
if resource_dir is None:
# If can't parse resources directory then silently return.
# This directory does not necesarily have to be valid to
# still have a valid PE file
self.__warnings.append(
'Invalid resources directory. Can\'t parse ' +
'directory data at RVA: 0x%x' % rva)
return None
dir_entries = []
# Advance the rva to the positon immediately following the directory
# table header and pointing to the first entry in the table
#
rva += resource_dir.sizeof()
number_of_entries = (
resource_dir.NumberOfNamedEntries +
resource_dir.NumberOfIdEntries )
strings_to_postprocess = list()
for idx in xrange(number_of_entries):
res = self.parse_resource_entry(rva)
if res is None:
self.__warnings.append(
'Error parsing the resources directory, ' +
'Entry %d is invalid, RVA = 0x%x. ' %
(idx, rva) )
break
entry_name = None
entry_id = None
# If all named entries have been processed, only Id ones
# remain
if idx >= resource_dir.NumberOfNamedEntries:
entry_id = res.Name
else:
ustr_offset = base_rva+res.NameOffset
try:
#entry_name = self.get_string_u_at_rva(ustr_offset, max_length=16)
entry_name = UnicodeStringWrapperPostProcessor(self, ustr_offset)
strings_to_postprocess.append(entry_name)
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the resources directory, ' +
'attempting to read entry name. ' +
'Can\'t read unicode string at offset 0x%x' %
(ustr_offset) )
if res.DataIsDirectory:
# OC Patch:
#
# One trick malware can do is to recursively reference
# the next directory. This causes hilarity to ensue when
# trying to parse everything correctly.
# If the original RVA given to this function is equal to
# the next one to parse, we assume that it's a trick.
# Instead of raising a PEFormatError this would skip some
# reasonable data so we just break.
#
# 9ee4d0a0caf095314fd7041a3e4404dc is the offending sample
if original_rva == (base_rva + res.OffsetToDirectory):
break
else:
entry_directory = self.parse_resources_directory(
base_rva+res.OffsetToDirectory,
base_rva=base_rva, level = level+1)
if not entry_directory:
break
dir_entries.append(
ResourceDirEntryData(
struct = res,
name = entry_name,
id = entry_id,
directory = entry_directory))
else:
struct = self.parse_resource_data_entry(
base_rva + res.OffsetToDirectory)
if struct:
entry_data = ResourceDataEntryData(
struct = struct,
lang = res.Name & 0xff,
sublang = (res.Name>>8) & 0xff)
dir_entries.append(
ResourceDirEntryData(
struct = res,
name = entry_name,
id = entry_id,
data = entry_data))
else:
break
# Check if this entry contains version information
#
if level == 0 and res.Id == RESOURCE_TYPE['RT_VERSION']:
if len(dir_entries)>0:
last_entry = dir_entries[-1]
rt_version_struct = None
try:
rt_version_struct = last_entry.directory.entries[0].directory.entries[0].data.struct
except:
# Maybe a malformed directory structure...?
# Lets ignore it
pass
if rt_version_struct is not None:
self.parse_version_information(rt_version_struct)
rva += res.sizeof()
string_rvas = [s.get_rva() for s in strings_to_postprocess]
string_rvas.sort()
for idx, s in enumerate(strings_to_postprocess):
s.render_pascal_16()
resource_directory_data = ResourceDirData(
struct = resource_dir,
entries = dir_entries)
return resource_directory_data
def parse_resource_data_entry(self, rva):
"""Parse a data entry from the resources directory."""
try:
# If the RVA is invalid all would blow up. Some EXEs seem to be
# specially nasty and have an invalid RVA.
data = self.get_data(rva)
except PEFormatError, excp:
self.__warnings.append(
'Error parsing a resource directory data entry, ' +
'the RVA is invalid: 0x%x' % ( rva ) )
return None
data_entry = self.__unpack_data__(
self.__IMAGE_RESOURCE_DATA_ENTRY_format__, data,
file_offset = self.get_offset_from_rva(rva) )
return data_entry
def parse_resource_entry(self, rva):
"""Parse a directory entry from the resources directory."""
resource = self.__unpack_data__(
self.__IMAGE_RESOURCE_DIRECTORY_ENTRY_format__, self.get_data(rva),
file_offset = self.get_offset_from_rva(rva) )
if resource is None:
return None
#resource.NameIsString = (resource.Name & 0x80000000L) >> 31
resource.NameOffset = resource.Name & 0x7FFFFFFFL
resource.__pad = resource.Name & 0xFFFF0000L
resource.Id = resource.Name & 0x0000FFFFL
resource.DataIsDirectory = (resource.OffsetToData & 0x80000000L) >> 31
resource.OffsetToDirectory = resource.OffsetToData & 0x7FFFFFFFL
return resource
def parse_version_information(self, version_struct):
"""Parse version information structure.
The date will be made available in three attributes of the PE object.
VS_VERSIONINFO will contain the first three fields of the main structure:
'Length', 'ValueLength', and 'Type'
VS_FIXEDFILEINFO will hold the rest of the fields, accessible as sub-attributes:
'Signature', 'StrucVersion', 'FileVersionMS', 'FileVersionLS',
'ProductVersionMS', 'ProductVersionLS', 'FileFlagsMask', 'FileFlags',
'FileOS', 'FileType', 'FileSubtype', 'FileDateMS', 'FileDateLS'
FileInfo is a list of all StringFileInfo and VarFileInfo structures.
StringFileInfo structures will have a list as an attribute named 'StringTable'
containing all the StringTable structures. Each of those structures contains a
dictionary 'entries' with all the key/value version information string pairs.
VarFileInfo structures will have a list as an attribute named 'Var' containing
all Var structures. Each Var structure will have a dictionary as an attribute
named 'entry' which will contain the name and value of the Var.
"""
# Retrieve the data for the version info resource
#
start_offset = self.get_offset_from_rva( version_struct.OffsetToData )
raw_data = self.__data__[ start_offset : start_offset+version_struct.Size ]
# Map the main structure and the subsequent string
#
versioninfo_struct = self.__unpack_data__(
self.__VS_VERSIONINFO_format__, raw_data,
file_offset = start_offset )
if versioninfo_struct is None:
return
ustr_offset = version_struct.OffsetToData + versioninfo_struct.sizeof()
try:
versioninfo_string = self.get_string_u_at_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read VS_VERSION_INFO string. Can\'t ' +
'read unicode string at offset 0x%x' % (
ustr_offset ) )
versioninfo_string = None
# If the structure does not contain the expected name, it's assumed to be invalid
#
if versioninfo_string != u'VS_VERSION_INFO':
self.__warnings.append('Invalid VS_VERSION_INFO block')
return
# Set the PE object's VS_VERSIONINFO to this one
#
self.VS_VERSIONINFO = versioninfo_struct
# The the Key attribute to point to the unicode string identifying the structure
#
self.VS_VERSIONINFO.Key = versioninfo_string
# Process the fixed version information, get the offset and structure
#
fixedfileinfo_offset = self.dword_align(
versioninfo_struct.sizeof() + 2 * (len(versioninfo_string) + 1),
version_struct.OffsetToData)
fixedfileinfo_struct = self.__unpack_data__(
self.__VS_FIXEDFILEINFO_format__,
raw_data[fixedfileinfo_offset:],
file_offset = start_offset+fixedfileinfo_offset )
if not fixedfileinfo_struct:
return
# Set the PE object's VS_FIXEDFILEINFO to this one
#
self.VS_FIXEDFILEINFO = fixedfileinfo_struct
# Start parsing all the StringFileInfo and VarFileInfo structures
#
# Get the first one
#
stringfileinfo_offset = self.dword_align(
fixedfileinfo_offset + fixedfileinfo_struct.sizeof(),
version_struct.OffsetToData)
original_stringfileinfo_offset = stringfileinfo_offset
# Set the PE object's attribute that will contain them all.
#
self.FileInfo = list()
while True:
# Process the StringFileInfo/VarFileInfo struct
#
stringfileinfo_struct = self.__unpack_data__(
self.__StringFileInfo_format__,
raw_data[stringfileinfo_offset:],
file_offset = start_offset+stringfileinfo_offset )
if stringfileinfo_struct is None:
self.__warnings.append(
'Error parsing StringFileInfo/VarFileInfo struct' )
return None
# Get the subsequent string defining the structure.
#
ustr_offset = ( version_struct.OffsetToData +
stringfileinfo_offset + versioninfo_struct.sizeof() )
try:
stringfileinfo_string = self.get_string_u_at_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read StringFileInfo string. Can\'t ' +
'read unicode string at offset 0x%x' % ( ustr_offset ) )
break
# Set such string as the Key attribute
#
stringfileinfo_struct.Key = stringfileinfo_string
# Append the structure to the PE object's list
#
self.FileInfo.append(stringfileinfo_struct)
# Parse a StringFileInfo entry
#
if stringfileinfo_string == u'StringFileInfo':
if stringfileinfo_struct.Type == 1 and stringfileinfo_struct.ValueLength == 0:
stringtable_offset = self.dword_align(
stringfileinfo_offset + stringfileinfo_struct.sizeof() +
2*(len(stringfileinfo_string)+1),
version_struct.OffsetToData)
stringfileinfo_struct.StringTable = list()
# Process the String Table entries
#
while True:
stringtable_struct = self.__unpack_data__(
self.__StringTable_format__,
raw_data[stringtable_offset:],
file_offset = start_offset+stringtable_offset )
if not stringtable_struct:
break
ustr_offset = ( version_struct.OffsetToData + stringtable_offset +
stringtable_struct.sizeof() )
try:
stringtable_string = self.get_string_u_at_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read StringTable string. Can\'t ' +
'read unicode string at offset 0x%x' % ( ustr_offset ) )
break
stringtable_struct.LangID = stringtable_string
stringtable_struct.entries = dict()
stringtable_struct.entries_offsets = dict()
stringtable_struct.entries_lengths = dict()
stringfileinfo_struct.StringTable.append(stringtable_struct)
entry_offset = self.dword_align(
stringtable_offset + stringtable_struct.sizeof() +
2*(len(stringtable_string)+1),
version_struct.OffsetToData)
# Process all entries in the string table
#
while entry_offset < stringtable_offset + stringtable_struct.Length:
string_struct = self.__unpack_data__(
self.__String_format__, raw_data[entry_offset:],
file_offset = start_offset+entry_offset )
if not string_struct:
break
ustr_offset = ( version_struct.OffsetToData + entry_offset +
string_struct.sizeof() )
try:
key = self.get_string_u_at_rva( ustr_offset )
key_offset = self.get_offset_from_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read StringTable Key string. Can\'t ' +
'read unicode string at offset 0x%x' % ( ustr_offset ) )
break
value_offset = self.dword_align(
2*(len(key)+1) + entry_offset + string_struct.sizeof(),
version_struct.OffsetToData)
ustr_offset = version_struct.OffsetToData + value_offset
try:
value = self.get_string_u_at_rva( ustr_offset,
max_length = string_struct.ValueLength )
value_offset = self.get_offset_from_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read StringTable Value string. ' +
'Can\'t read unicode string at offset 0x%x' % (
ustr_offset ) )
break
if string_struct.Length == 0:
entry_offset = stringtable_offset + stringtable_struct.Length
else:
entry_offset = self.dword_align(
string_struct.Length+entry_offset, version_struct.OffsetToData)
key_as_char = []
for c in key:
if ord(c)>128:
key_as_char.append('\\x%02x' %ord(c))
else:
key_as_char.append(c)
key_as_char = ''.join(key_as_char)
setattr(stringtable_struct, key_as_char, value)
stringtable_struct.entries[key] = value
stringtable_struct.entries_offsets[key] = (key_offset, value_offset)
stringtable_struct.entries_lengths[key] = (len(key), len(value))
stringtable_offset = self.dword_align(
stringtable_struct.Length + stringtable_offset,
version_struct.OffsetToData)
if stringtable_offset >= stringfileinfo_struct.Length:
break
# Parse a VarFileInfo entry
#
elif stringfileinfo_string == u'VarFileInfo':
varfileinfo_struct = stringfileinfo_struct
varfileinfo_struct.name = 'VarFileInfo'
if varfileinfo_struct.Type == 1 and varfileinfo_struct.ValueLength == 0:
var_offset = self.dword_align(
stringfileinfo_offset + varfileinfo_struct.sizeof() +
2*(len(stringfileinfo_string)+1),
version_struct.OffsetToData)
varfileinfo_struct.Var = list()
# Process all entries
#
while True:
var_struct = self.__unpack_data__(
self.__Var_format__,
raw_data[var_offset:],
file_offset = start_offset+var_offset )
if not var_struct:
break
ustr_offset = ( version_struct.OffsetToData + var_offset +
var_struct.sizeof() )
try:
var_string = self.get_string_u_at_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read VarFileInfo Var string. ' +
'Can\'t read unicode string at offset 0x%x' % (ustr_offset))
break
varfileinfo_struct.Var.append(var_struct)
varword_offset = self.dword_align(
2*(len(var_string)+1) + var_offset + var_struct.sizeof(),
version_struct.OffsetToData)
orig_varword_offset = varword_offset
while varword_offset < orig_varword_offset + var_struct.ValueLength:
word1 = self.get_word_from_data(
raw_data[varword_offset:varword_offset+2], 0)
word2 = self.get_word_from_data(
raw_data[varword_offset+2:varword_offset+4], 0)
varword_offset += 4
var_struct.entry = {var_string: '0x%04x 0x%04x' % (word1, word2)}
var_offset = self.dword_align(
var_offset+var_struct.Length, version_struct.OffsetToData)
if var_offset <= var_offset+var_struct.Length:
break
# Increment and align the offset
#
stringfileinfo_offset = self.dword_align(
stringfileinfo_struct.Length+stringfileinfo_offset,
version_struct.OffsetToData)
# Check if all the StringFileInfo and VarFileInfo items have been processed
#
if stringfileinfo_struct.Length == 0 or stringfileinfo_offset >= versioninfo_struct.Length:
break
def parse_export_directory(self, rva, size):
"""Parse the export directory.
Given the rva of the export directory, it will process all
its entries.
The exports will be made available through a list "exports"
containing a tuple with the following elements:
(ordinal, symbol_address, symbol_name)
And also through a dicionary "exports_by_ordinal" whose keys
will be the ordinals and the values tuples of the from:
(symbol_address, symbol_name)
The symbol addresses are relative, not absolute.
"""
try:
export_dir = self.__unpack_data__(
self.__IMAGE_EXPORT_DIRECTORY_format__, self.get_data(rva),
file_offset = self.get_offset_from_rva(rva) )
except PEFormatError:
self.__warnings.append(
'Error parsing export directory at RVA: 0x%x' % ( rva ) )
return
if not export_dir:
return
try:
address_of_names = self.get_data(
export_dir.AddressOfNames, export_dir.NumberOfNames*4)
address_of_name_ordinals = self.get_data(
export_dir.AddressOfNameOrdinals, export_dir.NumberOfNames*4)
address_of_functions = self.get_data(
export_dir.AddressOfFunctions, export_dir.NumberOfFunctions*4)
except PEFormatError:
self.__warnings.append(
'Error parsing export directory at RVA: 0x%x' % ( rva ) )
return
exports = []
for i in xrange(export_dir.NumberOfNames):
symbol_name = self.get_string_at_rva(
self.get_dword_from_data(address_of_names, i))
symbol_ordinal = self.get_word_from_data(
address_of_name_ordinals, i)
if symbol_ordinal*4<len(address_of_functions):
symbol_address = self.get_dword_from_data(
address_of_functions, symbol_ordinal)
else:
# Corrupt? a bad pointer... we assume it's all
# useless, no exports
return None
# If the funcion's rva points within the export directory
# it will point to a string with the forwarded symbol's string
# instead of pointing the the function start address.
if symbol_address>=rva and symbol_address<rva+size:
forwarder_str = self.get_string_at_rva(symbol_address)
else:
forwarder_str = None
exports.append(
ExportData(
ordinal = export_dir.Base+symbol_ordinal,
address = symbol_address,
name = symbol_name,
forwarder = forwarder_str))
ordinals = [exp.ordinal for exp in exports]
for idx in xrange(export_dir.NumberOfFunctions):
if not idx+export_dir.Base in ordinals:
symbol_address = self.get_dword_from_data(
address_of_functions,
idx)
#
# Checking for forwarder again.
#
if symbol_address>=rva and symbol_address<rva+size:
forwarder_str = self.get_string_at_rva(symbol_address)
else:
forwarder_str = None
exports.append(
ExportData(
ordinal = export_dir.Base+idx,
address = symbol_address,
name = None,
forwarder = forwarder_str))
return ExportDirData(
struct = export_dir,
symbols = exports)
def dword_align(self, offset, base):
offset += base
return (offset+3) - ((offset+3)%4) - base
def parse_delay_import_directory(self, rva, size):
"""Walk and parse the delay import directory."""
import_descs = []
while True:
try:
# If the RVA is invalid all would blow up. Some PEs seem to be
# specially nasty and have an invalid RVA.
data = self.get_data(rva)
except PEFormatError, e:
self.__warnings.append(
'Error parsing the Delay import directory at RVA: 0x%x' % ( rva ) )
break
import_desc = self.__unpack_data__(
self.__IMAGE_DELAY_IMPORT_DESCRIPTOR_format__,
data, file_offset = self.get_offset_from_rva(rva) )
# If the structure is all zeores, we reached the end of the list
if not import_desc or import_desc.all_zeroes():
break
rva += import_desc.sizeof()
try:
import_data = self.parse_imports(
import_desc.pINT,
import_desc.pIAT,
None)
except PEFormatError, e:
self.__warnings.append(
'Error parsing the Delay import directory. ' +
'Invalid import data at RVA: 0x%x' % ( rva ) )
break
if not import_data:
continue
dll = self.get_string_at_rva(import_desc.szName)
if dll:
import_descs.append(
ImportDescData(
struct = import_desc,
imports = import_data,
dll = dll))
return import_descs
def parse_import_directory(self, rva, size):
"""Walk and parse the import directory."""
import_descs = []
while True:
try:
# If the RVA is invalid all would blow up. Some EXEs seem to be
# specially nasty and have an invalid RVA.
data = self.get_data(rva)
except PEFormatError, e:
self.__warnings.append(
'Error parsing the Import directory at RVA: 0x%x' % ( rva ) )
break
import_desc = self.__unpack_data__(
self.__IMAGE_IMPORT_DESCRIPTOR_format__,
data, file_offset = self.get_offset_from_rva(rva) )
# If the structure is all zeores, we reached the end of the list
if not import_desc or import_desc.all_zeroes():
break
rva += import_desc.sizeof()
try:
import_data = self.parse_imports(
import_desc.OriginalFirstThunk,
import_desc.FirstThunk,
import_desc.ForwarderChain)
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the Import directory. ' +
'Invalid Import data at RVA: 0x%x' % ( rva ) )
break
#raise excp
if not import_data:
continue
dll = self.get_string_at_rva(import_desc.Name)
if dll:
import_descs.append(
ImportDescData(
struct = import_desc,
imports = import_data,
dll = dll))
return import_descs
def parse_imports(self, original_first_thunk, first_thunk, forwarder_chain):
"""Parse the imported symbols.
It will fill a list, which will be avalable as the dictionary
attribute "imports". Its keys will be the DLL names and the values
all the symbols imported from that object.
"""
imported_symbols = []
imports_section = self.get_section_by_rva(first_thunk)
if not imports_section:
raise PEFormatError, 'Invalid/corrupt imports.'
# Import Lookup Table. Contains ordinals or pointers to strings.
ilt = self.get_import_table(original_first_thunk)
# Import Address Table. May have identical content to ILT if
# PE file is not bounded, Will contain the address of the
# imported symbols once the binary is loaded or if it is already
# bound.
iat = self.get_import_table(first_thunk)
# OC Patch:
# Would crash if iat or ilt had None type
if not iat and not ilt:
raise PEFormatError(
'Invalid Import Table information. ' +
'Both ILT and IAT appear to be broken.')
if not iat and ilt:
table = ilt
elif iat and not ilt:
table = iat
elif ilt and ((len(ilt) and len(iat)==0) or (len(ilt) == len(iat))):
table = ilt
elif (ilt and len(ilt))==0 and (iat and len(iat)):
table = iat
else:
return None
for idx in xrange(len(table)):
imp_ord = None
imp_hint = None
imp_name = None
hint_name_table_rva = None
if table[idx].AddressOfData:
if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE:
ordinal_flag = IMAGE_ORDINAL_FLAG
elif self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS:
ordinal_flag = IMAGE_ORDINAL_FLAG64
# If imported by ordinal, we will append the ordinal number
#
if table[idx].AddressOfData & ordinal_flag:
import_by_ordinal = True
imp_ord = table[idx].AddressOfData & 0xffff
imp_name = None
else:
import_by_ordinal = False
try:
hint_name_table_rva = table[idx].AddressOfData & 0x7fffffff
data = self.get_data(hint_name_table_rva, 2)
# Get the Hint
imp_hint = self.get_word_from_data(data, 0)
imp_name = self.get_string_at_rva(table[idx].AddressOfData+2)
except PEFormatError, e:
pass
imp_address = first_thunk+self.OPTIONAL_HEADER.ImageBase+idx*4
if iat and ilt and ilt[idx].AddressOfData != iat[idx].AddressOfData:
imp_bound = iat[idx].AddressOfData
else:
imp_bound = None
if imp_name != '' and (imp_ord or imp_name):
imported_symbols.append(
ImportData(
import_by_ordinal = import_by_ordinal,
ordinal = imp_ord,
hint = imp_hint,
name = imp_name,
bound = imp_bound,
address = imp_address,
hint_name_table_rva = hint_name_table_rva))
return imported_symbols
def get_import_table(self, rva):
table = []
while True and rva:
try:
data = self.get_data(rva)
except PEFormatError, e:
self.__warnings.append(
'Error parsing the import table. ' +
'Invalid data at RVA: 0x%x' % ( rva ) )
return None
if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE:
format = self.__IMAGE_THUNK_DATA_format__
elif self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS:
format = self.__IMAGE_THUNK_DATA64_format__
thunk_data = self.__unpack_data__(
format, data, file_offset=self.get_offset_from_rva(rva) )
if not thunk_data or thunk_data.all_zeroes():
break
rva += thunk_data.sizeof()
table.append(thunk_data)
return table
def get_memory_mapped_image(self, max_virtual_address=0x10000000, ImageBase=None):
"""Returns the data corresponding to the memory layout of the PE file.
The data includes the PE header and the sections loaded at offsets
corresponding to their relative virtual addresses. (the VirtualAddress
section header member).
Any offset in this data corresponds to the absolute memory address
ImageBase+offset.
The optional argument 'max_virtual_address' provides with means of limiting
which section are processed.
Any section with their VirtualAddress beyond this value will be skipped.
Normally, sections with values beyond this range are just there to confuse
tools. It's a common trick to see in packed executables.
If the 'ImageBase' optional argument is supplied, the file's relocations
will be applied to the image by calling the 'relocate_image()' method.
"""
# Collect all sections in one code block
data = self.header
for section in self.sections:
# Miscellanous integrity tests.
# Some packer will set these to bogus values to
# make tools go nuts.
#
if section.Misc_VirtualSize == 0 or section.SizeOfRawData == 0:
continue
if section.SizeOfRawData > len(self.__data__):
continue
if section.PointerToRawData > len(self.__data__):
continue
if section.VirtualAddress >= max_virtual_address:
continue
padding_length = section.VirtualAddress - len(data)
if padding_length>0:
data += '\0'*padding_length
elif padding_length<0:
data = data[:padding_length]
data += section.data
return data
def get_data(self, rva, length=None):
"""Get data regardless of the section where it lies on.
Given a rva and the size of the chunk to retrieve, this method
will find the section where the data lies and return the data.
"""
s = self.get_section_by_rva(rva)
if not s:
if rva<len(self.header):
if length:
end = rva+length
else:
end = None
return self.header[rva:end]
raise PEFormatError, 'data at RVA can\'t be fetched. Corrupt header?'
return s.get_data(rva, length)
def get_rva_from_offset(self, offset):
"""Get the rva corresponding to this file offset. """
s = self.get_section_by_offset(offset)
if not s:
raise PEFormatError("specified offset (0x%x) doesn't belong to any section." % offset)
return s.get_rva_from_offset(offset)
def get_offset_from_rva(self, rva):
"""Get the file offset corresponding to this rva.
Given a rva , this method will find the section where the
data lies and return the offset within the file.
"""
s = self.get_section_by_rva(rva)
if not s:
raise PEFormatError, 'data at RVA can\'t be fetched. Corrupt header?'
return s.get_offset_from_rva(rva)
def get_string_at_rva(self, rva):
"""Get an ASCII string located at the given address."""
s = self.get_section_by_rva(rva)
if not s:
if rva<len(self.header):
return self.get_string_from_data(rva, self.header)
return None
return self.get_string_from_data(rva-s.VirtualAddress, s.data)
def get_string_from_data(self, offset, data):
"""Get an ASCII string from within the data."""
# OC Patch
b = None
try:
b = data[offset]
except IndexError:
return ''
s = ''
while ord(b):
s += b
offset += 1
try:
b = data[offset]
except IndexError:
break
return s
def get_string_u_at_rva(self, rva, max_length = 2**16):
"""Get an Unicode string located at the given address."""
try:
# If the RVA is invalid all would blow up. Some EXEs seem to be
# specially nasty and have an invalid RVA.
data = self.get_data(rva, 2)
except PEFormatError, e:
return None
#length = struct.unpack('<H', data)[0]
s = u''
for idx in xrange(max_length):
try:
uchr = struct.unpack('<H', self.get_data(rva+2*idx, 2))[0]
except struct.error:
break
if unichr(uchr) == u'\0':
break
s += unichr(uchr)
return s
def get_section_by_offset(self, offset):
"""Get the section containing the given file offset."""
sections = [s for s in self.sections if s.contains_offset(offset)]
if sections:
return sections[0]
return None
def get_section_by_rva(self, rva):
"""Get the section containing the given address."""
sections = [s for s in self.sections if s.contains_rva(rva)]
if sections:
return sections[0]
return None
def __str__(self):
return self.dump_info()
def print_info(self):
"""Print all the PE header information in a human readable from."""
print self.dump_info()
def dump_info(self, dump=None):
"""Dump all the PE header information into human readable string."""
if dump is None:
dump = Dump()
warnings = self.get_warnings()
if warnings:
dump.add_header('Parsing Warnings')
for warning in warnings:
dump.add_line(warning)
dump.add_newline()
dump.add_header('DOS_HEADER')
dump.add_lines(self.DOS_HEADER.dump())
dump.add_newline()
dump.add_header('NT_HEADERS')
dump.add_lines(self.NT_HEADERS.dump())
dump.add_newline()
dump.add_header('FILE_HEADER')
dump.add_lines(self.FILE_HEADER.dump())
image_flags = self.retrieve_flags(IMAGE_CHARACTERISTICS, 'IMAGE_FILE_')
dump.add('Flags: ')
flags = []
for flag in image_flags:
if getattr(self.FILE_HEADER, flag[0]):
flags.append(flag[0])
dump.add_line(', '.join(flags))
dump.add_newline()
if hasattr(self, 'OPTIONAL_HEADER') and self.OPTIONAL_HEADER is not None:
dump.add_header('OPTIONAL_HEADER')
dump.add_lines(self.OPTIONAL_HEADER.dump())
dll_characteristics_flags = self.retrieve_flags(DLL_CHARACTERISTICS, 'IMAGE_DLL_CHARACTERISTICS_')
dump.add('DllCharacteristics: ')
flags = []
for flag in dll_characteristics_flags:
if getattr(self.OPTIONAL_HEADER, flag[0]):
flags.append(flag[0])
dump.add_line(', '.join(flags))
dump.add_newline()
dump.add_header('PE Sections')
section_flags = self.retrieve_flags(SECTION_CHARACTERISTICS, 'IMAGE_SCN_')
for section in self.sections:
dump.add_lines(section.dump())
dump.add('Flags: ')
flags = []
for flag in section_flags:
if getattr(section, flag[0]):
flags.append(flag[0])
dump.add_line(', '.join(flags))
dump.add_line('Entropy: %f (Min=0.0, Max=8.0)' % section.get_entropy() )
if md5 is not None:
dump.add_line('MD5 hash: %s' % section.get_hash_md5() )
if sha1 is not None:
dump.add_line('SHA-1 hash: %s' % section.get_hash_sha1() )
if sha256 is not None:
dump.add_line('SHA-256 hash: %s' % section.get_hash_sha256() )
if sha512 is not None:
dump.add_line('SHA-512 hash: %s' % section.get_hash_sha512() )
dump.add_newline()
if (hasattr(self, 'OPTIONAL_HEADER') and
hasattr(self.OPTIONAL_HEADER, 'DATA_DIRECTORY') ):
dump.add_header('Directories')
for idx in xrange(len(self.OPTIONAL_HEADER.DATA_DIRECTORY)):
directory = self.OPTIONAL_HEADER.DATA_DIRECTORY[idx]
dump.add_lines(directory.dump())
dump.add_newline()
if hasattr(self, 'VS_VERSIONINFO'):
dump.add_header('Version Information')
dump.add_lines(self.VS_VERSIONINFO.dump())
dump.add_newline()
if hasattr(self, 'VS_FIXEDFILEINFO'):
dump.add_lines(self.VS_FIXEDFILEINFO.dump())
dump.add_newline()
if hasattr(self, 'FileInfo'):
for entry in self.FileInfo:
dump.add_lines(entry.dump())
dump.add_newline()
if hasattr(entry, 'StringTable'):
for st_entry in entry.StringTable:
[dump.add_line(' '+line) for line in st_entry.dump()]
dump.add_line(' LangID: '+st_entry.LangID)
dump.add_newline()
for str_entry in st_entry.entries.items():
dump.add_line(' '+str_entry[0]+': '+str_entry[1])
dump.add_newline()
elif hasattr(entry, 'Var'):
for var_entry in entry.Var:
if hasattr(var_entry, 'entry'):
[dump.add_line(' '+line) for line in var_entry.dump()]
dump.add_line(
' ' + var_entry.entry.keys()[0] +
': ' + var_entry.entry.values()[0])
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_EXPORT'):
dump.add_header('Exported symbols')
dump.add_lines(self.DIRECTORY_ENTRY_EXPORT.struct.dump())
dump.add_newline()
dump.add_line('%-10s %-10s %s' % ('Ordinal', 'RVA', 'Name'))
for export in self.DIRECTORY_ENTRY_EXPORT.symbols:
dump.add('%-10d 0x%08Xh %s' % (
export.ordinal, export.address, export.name))
if export.forwarder:
dump.add_line(' forwarder: %s' % export.forwarder)
else:
dump.add_newline()
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_IMPORT'):
dump.add_header('Imported symbols')
for module in self.DIRECTORY_ENTRY_IMPORT:
dump.add_lines(module.struct.dump())
dump.add_newline()
for symbol in module.imports:
if symbol.import_by_ordinal is True:
dump.add('%s Ordinal[%s] (Imported by Ordinal)' % (
module.dll, str(symbol.ordinal)))
else:
dump.add('%s.%s Hint[%s]' % (
module.dll, symbol.name, str(symbol.hint)))
if symbol.bound:
dump.add_line(' Bound: 0x%08X' % (symbol.bound))
else:
dump.add_newline()
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_BOUND_IMPORT'):
dump.add_header('Bound imports')
for bound_imp_desc in self.DIRECTORY_ENTRY_BOUND_IMPORT:
dump.add_lines(bound_imp_desc.struct.dump())
dump.add_line('DLL: %s' % bound_imp_desc.name)
dump.add_newline()
for bound_imp_ref in bound_imp_desc.entries:
dump.add_lines(bound_imp_ref.struct.dump(), 4)
dump.add_line('DLL: %s' % bound_imp_ref.name, 4)
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_DELAY_IMPORT'):
dump.add_header('Delay Imported symbols')
for module in self.DIRECTORY_ENTRY_DELAY_IMPORT:
dump.add_lines(module.struct.dump())
dump.add_newline()
for symbol in module.imports:
if symbol.import_by_ordinal is True:
dump.add('%s Ordinal[%s] (Imported by Ordinal)' % (
module.dll, str(symbol.ordinal)))
else:
dump.add('%s.%s Hint[%s]' % (
module.dll, symbol.name, str(symbol.hint)))
if symbol.bound:
dump.add_line(' Bound: 0x%08X' % (symbol.bound))
else:
dump.add_newline()
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_RESOURCE'):
dump.add_header('Resource directory')
dump.add_lines(self.DIRECTORY_ENTRY_RESOURCE.struct.dump())
for resource_type in self.DIRECTORY_ENTRY_RESOURCE.entries:
if resource_type.name is not None:
dump.add_line('Name: [%s]' % resource_type.name, 2)
else:
dump.add_line('Id: [0x%X] (%s)' % (
resource_type.struct.Id, RESOURCE_TYPE.get(
resource_type.struct.Id, '-')),
2)
dump.add_lines(resource_type.struct.dump(), 2)
if hasattr(resource_type, 'directory'):
dump.add_lines(resource_type.directory.struct.dump(), 4)
for resource_id in resource_type.directory.entries:
if resource_id.name is not None:
dump.add_line('Name: [%s]' % resource_id.name, 6)
else:
dump.add_line('Id: [0x%X]' % resource_id.struct.Id, 6)
dump.add_lines(resource_id.struct.dump(), 6)
if hasattr(resource_id, 'directory'):
dump.add_lines(resource_id.directory.struct.dump(), 8)
for resource_lang in resource_id.directory.entries:
# dump.add_line('\\--- LANG [%d,%d][%s]' % (
# resource_lang.data.lang,
# resource_lang.data.sublang,
# LANG[resource_lang.data.lang]), 8)
dump.add_lines(resource_lang.struct.dump(), 10)
dump.add_lines(resource_lang.data.struct.dump(), 12)
dump.add_newline()
dump.add_newline()
if ( hasattr(self, 'DIRECTORY_ENTRY_TLS') and
self.DIRECTORY_ENTRY_TLS and
self.DIRECTORY_ENTRY_TLS.struct ):
dump.add_header('TLS')
dump.add_lines(self.DIRECTORY_ENTRY_TLS.struct.dump())
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_DEBUG'):
dump.add_header('Debug information')
for dbg in self.DIRECTORY_ENTRY_DEBUG:
dump.add_lines(dbg.struct.dump())
try:
dump.add_line('Type: '+DEBUG_TYPE[dbg.struct.Type])
except KeyError:
dump.add_line('Type: 0x%x(Unknown)' % dbg.struct.Type)
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_BASERELOC'):
dump.add_header('Base relocations')
for base_reloc in self.DIRECTORY_ENTRY_BASERELOC:
dump.add_lines(base_reloc.struct.dump())
for reloc in base_reloc.entries:
try:
dump.add_line('%08Xh %s' % (
reloc.rva, RELOCATION_TYPE[reloc.type][16:]), 4)
except KeyError:
dump.add_line('0x%08X 0x%x(Unknown)' % (
reloc.rva, reloc.type), 4)
dump.add_newline()
return dump.get_text()
# OC Patch
def get_physical_by_rva(self, rva):
"""Gets the physical address in the PE file from an RVA value."""
try:
return self.get_offset_from_rva(rva)
except Exception:
return None
##
# Double-Word get/set
##
def get_data_from_dword(self, dword):
"""Return a four byte string representing the double word value. (little endian)."""
return struct.pack('<L', dword)
def get_dword_from_data(self, data, offset):
"""Convert four bytes of data to a double word (little endian)
'offset' is assumed to index into a dword array. So setting it to
N will return a dword out of the data sarting at offset N*4.
Returns None if the data can't be turned into a double word.
"""
if (offset+1)*4 > len(data):
return None
return struct.unpack('<L', data[offset*4:(offset+1)*4])[0]
def get_dword_at_rva(self, rva):
"""Return the double word value at the given RVA.
Returns None if the value can't be read, i.e. the RVA can't be mapped
to a file offset.
"""
try:
return self.get_dword_from_data(self.get_data(rva)[:4], 0)
except PEFormatError:
return None
def get_dword_from_offset(self, offset):
"""Return the double word value at the given file offset. (little endian)"""
if offset+4 > len(self.__data__):
return None
return self.get_dword_from_data(self.__data__[offset:offset+4], 0)
def set_dword_at_rva(self, rva, dword):
"""Set the double word value at the file offset corresponding to the given RVA."""
return self.set_bytes_at_rva(rva, self.get_data_from_dword(dword))
def set_dword_at_offset(self, offset, dword):
"""Set the double word value at the given file offset."""
return self.set_bytes_at_offset(offset, self.get_data_from_dword(dword))
##
# Word get/set
##
def get_data_from_word(self, word):
"""Return a two byte string representing the word value. (little endian)."""
return struct.pack('<H', word)
def get_word_from_data(self, data, offset):
"""Convert two bytes of data to a word (little endian)
'offset' is assumed to index into a word array. So setting it to
N will return a dword out of the data sarting at offset N*2.
Returns None if the data can't be turned into a word.
"""
if (offset+1)*2 > len(data):
return None
return struct.unpack('<H', data[offset*2:(offset+1)*2])[0]
def get_word_at_rva(self, rva):
"""Return the word value at the given RVA.
Returns None if the value can't be read, i.e. the RVA can't be mapped
to a file offset.
"""
try:
return self.get_word_from_data(self.get_data(rva)[:2], 0)
except PEFormatError:
return None
def get_word_from_offset(self, offset):
"""Return the word value at the given file offset. (little endian)"""
if offset+2 > len(self.__data__):
return None
return self.get_word_from_data(self.__data__[offset:offset+2], 0)
def set_word_at_rva(self, rva, word):
"""Set the word value at the file offset corresponding to the given RVA."""
return self.set_bytes_at_rva(rva, self.get_data_from_word(word))
def set_word_at_offset(self, offset, word):
"""Set the word value at the given file offset."""
return self.set_bytes_at_offset(offset, self.get_data_from_word(word))
##
# Quad-Word get/set
##
def get_data_from_qword(self, word):
"""Return a eight byte string representing the quad-word value. (little endian)."""
return struct.pack('<Q', word)
def get_qword_from_data(self, data, offset):
"""Convert eight bytes of data to a word (little endian)
'offset' is assumed to index into a word array. So setting it to
N will return a dword out of the data sarting at offset N*8.
Returns None if the data can't be turned into a quad word.
"""
if (offset+1)*8 > len(data):
return None
return struct.unpack('<Q', data[offset*8:(offset+1)*8])[0]
def get_qword_at_rva(self, rva):
"""Return the quad-word value at the given RVA.
Returns None if the value can't be read, i.e. the RVA can't be mapped
to a file offset.
"""
try:
return self.get_qword_from_data(self.get_data(rva)[:8], 0)
except PEFormatError:
return None
def get_qword_from_offset(self, offset):
"""Return the quad-word value at the given file offset. (little endian)"""
if offset+8 > len(self.__data__):
return None
return self.get_qword_from_data(self.__data__[offset:offset+8], 0)
def set_qword_at_rva(self, rva, qword):
"""Set the quad-word value at the file offset corresponding to the given RVA."""
return self.set_bytes_at_rva(rva, self.get_data_from_qword(qword))
def set_qword_at_offset(self, offset, qword):
"""Set the quad-word value at the given file offset."""
return self.set_bytes_at_offset(offset, self.get_data_from_qword(qword))
##
# Set bytes
##
def set_bytes_at_rva(self, rva, data):
"""Overwrite, with the given string, the bytes at the file offset corresponding to the given RVA.
Return True if successful, False otherwise. It can fail if the
offset is outside the file's boundaries.
"""
offset = self.get_physical_by_rva(rva)
if not offset:
raise False
return self.set_bytes_at_offset(offset, data)
def set_bytes_at_offset(self, offset, data):
"""Overwrite the bytes at the given file offset with the given string.
Return True if successful, False otherwise. It can fail if the
offset is outside the file's boundaries.
"""
if not isinstance(data, str):
raise TypeError('data should be of type: str')
if offset >= 0 and offset < len(self.__data__):
self.__data__ = ( self.__data__[:offset] +
data +
self.__data__[offset+len(data):] )
else:
return False
# Refresh the section's data with the modified information
#
for section in self.sections:
section_data_start = section.PointerToRawData
section_data_end = section_data_start+section.SizeOfRawData
section.data = self.__data__[section_data_start:section_data_end]
return True
def relocate_image(self, new_ImageBase):
"""Apply the relocation information to the image using the provided new image base.
This method will apply the relocation information to the image. Given the new base,
all the relocations will be processed and both the raw data and the section's data
will be fixed accordingly.
The resulting image can be retrieved as well through the method:
get_memory_mapped_image()
In order to get something that would more closely match what could be found in memory
once the Windows loader finished its work.
"""
relocation_difference = new_ImageBase - self.OPTIONAL_HEADER.ImageBase
for reloc in self.DIRECTORY_ENTRY_BASERELOC:
virtual_address = reloc.struct.VirtualAddress
size_of_block = reloc.struct.SizeOfBlock
# We iterate with an index because if the relocation is of type
# IMAGE_REL_BASED_HIGHADJ we need to also process the next entry
# at once and skip it for the next interation
#
entry_idx = 0
while entry_idx<len(reloc.entries):
entry = reloc.entries[entry_idx]
entry_idx += 1
if entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_ABSOLUTE']:
# Nothing to do for this type of relocation
pass
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_HIGH']:
# Fix the high 16bits of a relocation
#
# Add high 16bits of relocation_difference to the
# 16bit value at RVA=entry.rva
self.set_word_at_rva(
entry.rva,
( self.get_word_at_rva(entry.rva) + relocation_difference>>16)&0xffff )
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_LOW']:
# Fix the low 16bits of a relocation
#
# Add low 16 bits of relocation_difference to the 16bit value
# at RVA=entry.rva
self.set_word_at_rva(
entry.rva,
( self.get_word_at_rva(entry.rva) + relocation_difference)&0xffff)
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_HIGHLOW']:
# Handle all high and low parts of a 32bit relocation
#
# Add relocation_difference to the value at RVA=entry.rva
self.set_dword_at_rva(
entry.rva,
self.get_dword_at_rva(entry.rva)+relocation_difference)
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_HIGHADJ']:
# Fix the high 16bits of a relocation and adjust
#
# Add high 16bits of relocation_difference to the 32bit value
# composed from the (16bit value at RVA=entry.rva)<<16 plus
# the 16bit value at the next relocation entry.
#
# If the next entry is beyond the array's limits,
# abort... the table is corrupt
#
if entry_idx == len(reloc.entries):
break
next_entry = reloc.entries[entry_idx]
entry_idx += 1
self.set_word_at_rva( entry.rva,
((self.get_word_at_rva(entry.rva)<<16) + next_entry.rva +
relocation_difference & 0xffff0000) >> 16 )
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_DIR64']:
# Apply the difference to the 64bit value at the offset
# RVA=entry.rva
self.set_qword_at_rva(
entry.rva,
self.get_qword_at_rva(entry.rva) + relocation_difference)
def verify_checksum(self):
return self.OPTIONAL_HEADER.CheckSum == self.generate_checksum()
def generate_checksum(self):
# Get the offset to the CheckSum field in the OptionalHeader
#
checksum_offset = self.OPTIONAL_HEADER.__file_offset__ + 0x40 # 64
checksum = 0
for i in range( len(self.__data__) / 4 ):
# Skip the checksum field
#
if i == checksum_offset / 4:
continue
dword = struct.unpack('L', self.__data__[ i*4 : i*4+4 ])[0]
checksum = (checksum & 0xffffffff) + dword + (checksum>>32)
if checksum > 2**32:
checksum = (checksum & 0xffffffff) + (checksum >> 32)
checksum = (checksum & 0xffff) + (checksum >> 16)
checksum = (checksum) + (checksum >> 16)
checksum = checksum & 0xffff
return checksum + len(self.__data__)
|
ns950/calibre
|
refs/heads/master
|
src/calibre/ebooks/pdf/reflow.py
|
14
|
#!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import sys, os
from lxml import etree
class Font(object):
def __init__(self, spec):
self.id = spec.get('id')
self.size = float(spec.get('size'))
self.color = spec.get('color')
self.family = spec.get('family')
class Element(object):
def __init__(self):
self.starts_block = None
self.block_style = None
def __eq__(self, other):
return self.id == other.id
def __hash__(self):
return hash(self.id)
class Image(Element):
def __init__(self, img, opts, log, idc):
Element.__init__(self)
self.opts, self.log = opts, log
self.id = idc.next()
self.top, self.left, self.width, self.height, self.iwidth, self.iheight = \
map(float, map(img.get, ('top', 'left', 'rwidth', 'rheight', 'iwidth',
'iheight')))
self.src = img.get('src')
self.bottom = self.top + self.height
self.right = self.left + self.width
def to_html(self):
return '<img src="%s" width="%dpx" height="%dpx"/>' % \
(self.src, int(self.width), int(self.height))
def dump(self, f):
f.write(self.to_html())
f.write('\n')
class Text(Element):
def __init__(self, text, font_map, opts, log, idc):
Element.__init__(self)
self.id = idc.next()
self.opts, self.log = opts, log
self.font_map = font_map
self.top, self.left, self.width, self.height = map(float, map(text.get,
('top', 'left', 'width', 'height')))
self.bottom = self.top + self.height
self.right = self.left + self.width
self.font = self.font_map[text.get('font')]
self.font_size = self.font.size
self.color = self.font.color
self.font_family = self.font.family
text.tail = ''
self.text_as_string = etree.tostring(text, method='text',
encoding=unicode)
self.raw = text.text if text.text else u''
for x in text.iterchildren():
self.raw += etree.tostring(x, method='xml', encoding=unicode)
self.average_character_width = self.width/len(self.text_as_string)
def coalesce(self, other, page_number):
if self.opts.verbose > 2:
self.log.debug('Coalescing %r with %r on page %d'%(self.text_as_string,
other.text_as_string, page_number))
self.top = min(self.top, other.top)
self.right = other.right
self.width = self.right - self.left
self.bottom = max(self.bottom, other.bottom)
self.height = self.bottom - self.top
self.font_size = max(self.font_size, other.font_size)
self.font = other.font if self.font_size == other.font_size else other.font
self.text_as_string += other.text_as_string
self.raw += other.raw
self.average_character_width = (self.average_character_width +
other.average_character_width)/2.0
def to_html(self):
return self.raw
def dump(self, f):
f.write(self.to_html().encode('utf-8'))
f.write('\n')
class FontSizeStats(dict):
def __init__(self, stats):
total = float(sum(stats.values()))
self.most_common_size, self.chars_at_most_common_size = -1, 0
for sz, chars in stats.items():
if chars >= self.chars_at_most_common_size:
self.most_common_size, self.chars_at_most_common_size = sz, chars
self[sz] = chars/total
class Interval(object):
def __init__(self, left, right):
self.left, self.right = left, right
self.width = right - left
def intersection(self, other):
left = max(self.left, other.left)
right = min(self.right, other.right)
return Interval(left, right)
def centered_in(self, parent):
left = abs(self.left - parent.left)
right = abs(self.right - parent.right)
return abs(left-right) < 3
def __nonzero__(self):
return self.width > 0
def __eq__(self, other):
return self.left == other.left and self.right == other.right
def __hash__(self):
return hash('(%f,%f)'%self.left, self.right)
class Column(object):
# A column contains an element is the element bulges out to
# the left or the right by at most HFUZZ*col width.
HFUZZ = 0.2
def __init__(self):
self.left = self.right = self.top = self.bottom = 0
self.width = self.height = 0
self.elements = []
self.average_line_separation = 0
def add(self, elem):
if elem in self.elements: return
self.elements.append(elem)
self._post_add()
def prepend(self, elem):
if elem in self.elements: return
self.elements.insert(0, elem)
self._post_add()
def _post_add(self):
self.elements.sort(cmp=lambda x,y:cmp(x.bottom,y.bottom))
self.top = self.elements[0].top
self.bottom = self.elements[-1].bottom
self.left, self.right = sys.maxint, 0
for x in self:
self.left = min(self.left, x.left)
self.right = max(self.right, x.right)
self.width, self.height = self.right-self.left, self.bottom-self.top
def __iter__(self):
for x in self.elements:
yield x
def __len__(self):
return len(self.elements)
def contains(self, elem):
return elem.left > self.left - self.HFUZZ*self.width and \
elem.right < self.right + self.HFUZZ*self.width
def collect_stats(self):
if len(self.elements) > 1:
gaps = [self.elements[i+1].top - self.elements[i].bottom for i in
range(0, len(self.elements)-1)]
self.average_line_separation = sum(gaps)/len(gaps)
for i, elem in enumerate(self.elements):
left_margin = elem.left - self.left
elem.indent_fraction = left_margin/self.width
elem.width_fraction = elem.width/self.width
if i == 0:
elem.top_gap_ratio = None
else:
elem.top_gap_ratio = (self.elements[i-1].bottom -
elem.top)/self.average_line_separation
def previous_element(self, idx):
if idx == 0:
return None
return self.elements[idx-1]
def dump(self, f, num):
f.write('******** Column %d\n\n'%num)
for elem in self.elements:
elem.dump(f)
class Box(list):
def __init__(self, type='p'):
self.tag = type
def to_html(self):
ans = ['<%s>'%self.tag]
for elem in self:
if isinstance(elem, int):
ans.append('<a name="page_%d"/>'%elem)
else:
ans.append(elem.to_html()+' ')
ans.append('</%s>'%self.tag)
return ans
class ImageBox(Box):
def __init__(self, img):
Box.__init__(self)
self.img = img
def to_html(self):
ans = ['<div style="text-align:center">']
ans.append(self.img.to_html())
if len(self) > 0:
ans.append('<br/>')
for elem in self:
if isinstance(elem, int):
ans.append('<a name="page_%d"/>'%elem)
else:
ans.append(elem.to_html()+' ')
ans.append('</div>')
return ans
class Region(object):
def __init__(self, opts, log):
self.opts, self.log = opts, log
self.columns = []
self.top = self.bottom = self.left = self.right = self.width = self.height = 0
def add(self, columns):
if not self.columns:
for x in sorted(columns, cmp=lambda x,y: cmp(x.left, y.left)):
self.columns.append(x)
else:
for i in range(len(columns)):
for elem in columns[i]:
self.columns[i].add(elem)
def contains(self, columns):
# TODO: handle unbalanced columns
if not self.columns:
return True
if len(columns) != len(self.columns):
return False
for i in range(len(columns)):
c1, c2 = self.columns[i], columns[i]
x1 = Interval(c1.left, c1.right)
x2 = Interval(c2.left, c2.right)
intersection = x1.intersection(x2)
base = min(x1.width, x2.width)
if intersection.width/base < 0.6:
return False
return True
@property
def is_empty(self):
return len(self.columns) == 0
@property
def line_count(self):
max_lines = 0
for c in self.columns:
max_lines = max(max_lines, len(c))
return max_lines
@property
def is_small(self):
return self.line_count < 3
def absorb(self, singleton):
def most_suitable_column(elem):
mc, mw = None, 0
for c in self.columns:
i = Interval(c.left, c.right)
e = Interval(elem.left, elem.right)
w = i.intersection(e).width
if w > mw:
mc, mw = c, w
if mc is None:
self.log.warn('No suitable column for singleton',
elem.to_html())
mc = self.columns[0]
return mc
for c in singleton.columns:
for elem in c:
col = most_suitable_column(elem)
if self.opts.verbose > 3:
idx = self.columns.index(col)
self.log.debug(u'Absorbing singleton %s into column'%elem.to_html(),
idx)
col.add(elem)
def collect_stats(self):
for column in self.columns:
column.collect_stats()
self.average_line_separation = sum([x.average_line_separation for x in
self.columns])/float(len(self.columns))
def __iter__(self):
for x in self.columns:
yield x
def absorb_regions(self, regions, at):
for region in regions:
self.absorb_region(region, at)
def absorb_region(self, region, at):
if len(region.columns) <= len(self.columns):
for i in range(len(region.columns)):
src, dest = region.columns[i], self.columns[i]
if at != 'bottom':
src = reversed(list(iter(src)))
for elem in src:
func = dest.add if at == 'bottom' else dest.prepend
func(elem)
else:
col_map = {}
for i, col in enumerate(region.columns):
max_overlap, max_overlap_index = 0, 0
for j, dcol in enumerate(self.columns):
sint = Interval(col.left, col.right)
dint = Interval(dcol.left, dcol.right)
width = sint.intersection(dint).width
if width > max_overlap:
max_overlap = width
max_overlap_index = j
col_map[i] = max_overlap_index
lines = max(map(len, region.columns))
if at == 'bottom':
lines = range(lines)
else:
lines = range(lines-1, -1, -1)
for i in lines:
for j, src in enumerate(region.columns):
dest = self.columns[col_map[j]]
if i < len(src):
func = dest.add if at == 'bottom' else dest.prepend
func(src.elements[i])
def dump(self, f):
f.write('############################################################\n')
f.write('########## Region (%d columns) ###############\n'%len(self.columns))
f.write('############################################################\n\n')
for i, col in enumerate(self.columns):
col.dump(f, i)
def linearize(self):
self.elements = []
for x in self.columns:
self.elements.extend(x)
self.boxes = [Box()]
for i, elem in enumerate(self.elements):
if isinstance(elem, Image):
self.boxes.append(ImageBox(elem))
img = Interval(elem.left, elem.right)
for j in range(i+1, len(self.elements)):
t = self.elements[j]
if not isinstance(t, Text):
break
ti = Interval(t.left, t.right)
if not ti.centered_in(img):
break
self.boxes[-1].append(t)
self.boxes.append(Box())
else:
is_indented = False
if i+1 < len(self.elements):
indent_diff = elem.indent_fraction - \
self.elements[i+1].indent_fraction
if indent_diff > 0.05:
is_indented = True
if elem.top_gap_ratio > 1.2 or is_indented:
self.boxes.append(Box())
self.boxes[-1].append(elem)
class Page(object):
# Fraction of a character width that two strings have to be apart,
# for them to be considered part of the same text fragment
COALESCE_FACTOR = 0.5
# Fraction of text height that two strings' bottoms can differ by
# for them to be considered to be part of the same text fragment
LINE_FACTOR = 0.4
# Multiplies the average line height when determining row height
# of a particular element to detect columns.
YFUZZ = 1.5
def __init__(self, page, font_map, opts, log, idc):
self.opts, self.log = opts, log
self.font_map = font_map
self.number = int(page.get('number'))
self.width, self.height = map(float, map(page.get,
('width', 'height')))
self.id = 'page%d'%self.number
self.texts = []
self.left_margin, self.right_margin = self.width, 0
for text in page.xpath('descendant::text'):
self.texts.append(Text(text, self.font_map, self.opts, self.log, idc))
text = self.texts[-1]
self.left_margin = min(text.left, self.left_margin)
self.right_margin = max(text.right, self.right_margin)
self.textwidth = self.right_margin - self.left_margin
self.font_size_stats = {}
self.average_text_height = 0
for t in self.texts:
if t.font_size not in self.font_size_stats:
self.font_size_stats[t.font_size] = 0
self.font_size_stats[t.font_size] += len(t.text_as_string)
self.average_text_height += t.height
if len(self.texts):
self.average_text_height /= len(self.texts)
self.font_size_stats = FontSizeStats(self.font_size_stats)
self.coalesce_fragments()
self.elements = list(self.texts)
for img in page.xpath('descendant::img'):
self.elements.append(Image(img, self.opts, self.log, idc))
self.elements.sort(cmp=lambda x,y:cmp(x.top, y.top))
def coalesce_fragments(self):
def find_match(frag):
for t in self.texts:
hdelta = t.left - frag.right
hoverlap = self.COALESCE_FACTOR * frag.average_character_width
if t is not frag and hdelta > -hoverlap and \
hdelta < hoverlap and \
abs(t.bottom - frag.bottom) < self.LINE_FACTOR*frag.height:
return t
match_found = True
while match_found:
match_found, match = False, None
for frag in self.texts:
match = find_match(frag)
if match is not None:
match_found = True
frag.coalesce(match, self.number)
break
if match is not None:
self.texts.remove(match)
def first_pass(self):
'Sort page into regions and columns'
self.regions = []
if not self.elements:
return
for i, x in enumerate(self.elements):
x.idx = i
current_region = Region(self.opts, self.log)
processed = set([])
for x in self.elements:
if x in processed: continue
elems = set(self.find_elements_in_row_of(x))
columns = self.sort_into_columns(x, elems)
processed.update(elems)
if not current_region.contains(columns):
self.regions.append(current_region)
current_region = Region(self.opts, self.log)
current_region.add(columns)
if not current_region.is_empty:
self.regions.append(current_region)
if self.opts.verbose > 2:
self.debug_dir = 'page-%d'%self.number
os.mkdir(self.debug_dir)
self.dump_regions('pre-coalesce')
self.coalesce_regions()
self.dump_regions('post-coalesce')
def dump_regions(self, fname):
fname = 'regions-'+fname+'.txt'
with open(os.path.join(self.debug_dir, fname), 'wb') as f:
f.write('Page #%d\n\n'%self.number)
for region in self.regions:
region.dump(f)
def coalesce_regions(self):
# find contiguous sets of small regions
# absorb into a neighboring region (prefer the one with number of cols
# closer to the avg number of cols in the set, if equal use larger
# region)
found = True
absorbed = set([])
processed = set([])
while found:
found = False
for i, region in enumerate(self.regions):
if region in absorbed:
continue
if region.is_small and region not in processed:
found = True
processed.add(region)
regions = [region]
end = i+1
for j in range(i+1, len(self.regions)):
end = j
if self.regions[j].is_small:
regions.append(self.regions[j])
else:
break
prev_region = None if i == 0 else i-1
next_region = end if end < len(self.regions) and self.regions[end] not in regions else None
absorb_at = 'bottom'
if prev_region is None and next_region is not None:
absorb_into = next_region
absorb_at = 'top'
elif next_region is None and prev_region is not None:
absorb_into = prev_region
elif prev_region is None and next_region is None:
if len(regions) > 1:
absorb_into = i
regions = regions[1:]
else:
absorb_into = None
else:
absorb_into = prev_region
if self.regions[next_region].line_count >= \
self.regions[prev_region].line_count:
avg_column_count = sum([len(r.columns) for r in
regions])/float(len(regions))
if self.regions[next_region].line_count > \
self.regions[prev_region].line_count \
or abs(avg_column_count -
len(self.regions[prev_region].columns)) \
> abs(avg_column_count -
len(self.regions[next_region].columns)):
absorb_into = next_region
absorb_at = 'top'
if absorb_into is not None:
self.regions[absorb_into].absorb_regions(regions, absorb_at)
absorbed.update(regions)
for region in absorbed:
self.regions.remove(region)
def sort_into_columns(self, elem, neighbors):
neighbors.add(elem)
neighbors = sorted(neighbors, cmp=lambda x,y:cmp(x.left, y.left))
if self.opts.verbose > 3:
self.log.debug('Neighbors:', [x.to_html() for x in neighbors])
columns = [Column()]
columns[0].add(elem)
for x in neighbors:
added = False
for c in columns:
if c.contains(x):
c.add(x)
added = True
break
if not added:
columns.append(Column())
columns[-1].add(x)
columns.sort(cmp=lambda x,y:cmp(x.left, y.left))
return columns
def find_elements_in_row_of(self, x):
interval = Interval(x.top,
x.top + self.YFUZZ*(self.average_text_height))
h_interval = Interval(x.left, x.right)
for y in self.elements[x.idx:x.idx+15]:
if y is not x:
y_interval = Interval(y.top, y.bottom)
x_interval = Interval(y.left, y.right)
if interval.intersection(y_interval).width > \
0.5*self.average_text_height and \
x_interval.intersection(h_interval).width <= 0:
yield y
def second_pass(self):
'Locate paragraph boundaries in each column'
for region in self.regions:
region.collect_stats()
region.linearize()
class PDFDocument(object):
def __init__(self, xml, opts, log):
self.opts, self.log = opts, log
parser = etree.XMLParser(recover=True)
self.root = etree.fromstring(xml, parser=parser)
idc = iter(xrange(sys.maxint))
self.fonts = []
self.font_map = {}
for spec in self.root.xpath('//font'):
self.fonts.append(Font(spec))
self.font_map[self.fonts[-1].id] = self.fonts[-1]
self.pages = []
self.page_map = {}
for page in self.root.xpath('//page'):
page = Page(page, self.font_map, opts, log, idc)
self.page_map[page.id] = page
self.pages.append(page)
self.collect_font_statistics()
for page in self.pages:
page.document_font_stats = self.font_size_stats
page.first_pass()
page.second_pass()
self.linearize()
self.render()
def collect_font_statistics(self):
self.font_size_stats = {}
for p in self.pages:
for sz in p.font_size_stats:
chars = p.font_size_stats[sz]
if sz not in self.font_size_stats:
self.font_size_stats[sz] = 0
self.font_size_stats[sz] += chars
self.font_size_stats = FontSizeStats(self.font_size_stats)
def linearize(self):
self.elements = []
last_region = last_block = None
for page in self.pages:
page_number_inserted = False
for region in page.regions:
merge_first_block = last_region is not None and \
len(last_region.columns) == len(region.columns) and \
not hasattr(last_block, 'img')
for i, block in enumerate(region.boxes):
if merge_first_block:
merge_first_block = False
if not page_number_inserted:
last_block.append(page.number)
page_number_inserted = True
for elem in block:
last_block.append(elem)
else:
if not page_number_inserted:
block.insert(0, page.number)
page_number_inserted = True
self.elements.append(block)
last_block = block
last_region = region
def render(self):
html = ['<?xml version="1.0" encoding="UTF-8"?>',
'<html xmlns="http://www.w3.org/1999/xhtml">', '<head>',
'<title>PDF Reflow conversion</title>', '</head>', '<body>',
'<div>']
for elem in self.elements:
html.extend(elem.to_html())
html += ['</body>', '</html>']
raw = (u'\n'.join(html)).replace('</strong><strong>', '')
with open('index.html', 'wb') as f:
f.write(raw.encode('utf-8'))
|
ehashman/oh-mainline
|
refs/heads/master
|
vendor/packages/scrapy/scrapy/utils/testproc.py
|
19
|
import sys
import os
from twisted.internet import reactor, defer, protocol
class ProcessTest(object):
command = None
prefix = [sys.executable, '-m', 'scrapy.cmdline']
cwd = os.getcwd() # trial chdirs to temp dir
def execute(self, args, check_code=True, settings='missing'):
env = os.environ.copy()
env['SCRAPY_SETTINGS_MODULE'] = settings
cmd = self.prefix + [self.command] + list(args)
pp = TestProcessProtocol()
pp.deferred.addBoth(self._process_finished, cmd, check_code)
reactor.spawnProcess(pp, cmd[0], cmd, env=env, path=self.cwd)
return pp.deferred
def _process_finished(self, pp, cmd, check_code):
if pp.exitcode and check_code:
msg = "process %s exit with code %d" % (cmd, pp.exitcode)
msg += "\n>>> stdout <<<\n%s" % pp.out
msg += "\n"
msg += "\n>>> stderr <<<\n%s" % pp.err
raise RuntimeError(msg)
return pp.exitcode, pp.out, pp.err
class TestProcessProtocol(protocol.ProcessProtocol):
def __init__(self):
self.deferred = defer.Deferred()
self.out = ''
self.err = ''
self.exitcode = None
def outReceived(self, data):
self.out += data
def errReceived(self, data):
self.err += data
def processEnded(self, status):
self.exitcode = status.value.exitCode
self.deferred.callback(self)
|
adragomir/dcos-commons
|
refs/heads/master
|
frameworks/kafka/tests/test_shakedown_basics.py
|
1
|
import pytest
import sdk_install as install
import sdk_tasks as tasks
import sdk_spin as spin
import sdk_cmd as command
import sdk_utils as utils
import dcos
import dcos.config
import dcos.http
from tests.test_utils import (
DEFAULT_PARTITION_COUNT,
DEFAULT_REPLICATION_FACTOR,
PACKAGE_NAME,
SERVICE_NAME,
DEFAULT_BROKER_COUNT,
DEFAULT_TOPIC_NAME,
EPHEMERAL_TOPIC_NAME,
DEFAULT_POD_TYPE,
DEFAULT_PHASE_NAME,
DEFAULT_PLAN_NAME,
DEFAULT_TASK_NAME,
service_cli
)
def setup_module(module):
install.uninstall(SERVICE_NAME, PACKAGE_NAME)
utils.gc_frameworks()
install.install(PACKAGE_NAME, DEFAULT_BROKER_COUNT, service_name = SERVICE_NAME)
def teardown_module(module):
install.uninstall(SERVICE_NAME, PACKAGE_NAME)
# --------- Endpoints -------------
@pytest.mark.smoke
@pytest.mark.sanity
def test_endpoints_address():
def fun():
ret = service_cli('endpoints {}'.format(DEFAULT_TASK_NAME))
if len(ret['native']) == DEFAULT_BROKER_COUNT:
return ret
return False
address = spin.time_wait_return(fun)
assert len(address) == 3
assert len(address['direct']) == DEFAULT_BROKER_COUNT
@pytest.mark.smoke
@pytest.mark.sanity
def test_endpoints_zookeeper():
zookeeper = command.run_cli('{} endpoints zookeeper'.format(PACKAGE_NAME))
assert zookeeper.rstrip() == (
'master.mesos:2181/dcos-service-{}'.format(PACKAGE_NAME)
)
# --------- Broker -------------
@pytest.mark.smoke
@pytest.mark.sanity
def test_broker_list():
brokers = service_cli('broker list')
assert set(brokers) == set([str(i) for i in range(DEFAULT_BROKER_COUNT)])
@pytest.mark.smoke
@pytest.mark.sanity
def test_broker_invalid():
try:
command.run_cli('{} broker get {}'.format(PACKAGE_NAME, DEFAULT_BROKER_COUNT + 1))
assert False, "Should have failed"
except AssertionError as arg:
raise arg
except:
pass # expected to fail
# --------- Pods -------------
@pytest.mark.smoke
@pytest.mark.sanity
def test_pods_restart():
for i in range(DEFAULT_BROKER_COUNT):
broker_id = tasks.get_task_ids(SERVICE_NAME,'{}-{}-{}'.format(DEFAULT_POD_TYPE, i, DEFAULT_TASK_NAME))
restart_info = service_cli('pods restart {}-{}'.format(DEFAULT_POD_TYPE, i))
tasks.check_tasks_updated(SERVICE_NAME, '{}-{}-{}'.format(DEFAULT_POD_TYPE, i, DEFAULT_TASK_NAME), broker_id)
assert len(restart_info) == 2
assert restart_info['tasks'][0] == '{}-{}-{}'.format(DEFAULT_POD_TYPE, i, DEFAULT_TASK_NAME)
@pytest.mark.smoke
@pytest.mark.sanity
def test_pods_replace():
broker_0_id = tasks.get_task_ids(SERVICE_NAME, '{}-0-{}'.format(DEFAULT_POD_TYPE, DEFAULT_TASK_NAME))
service_cli('pods replace {}-0'.format(DEFAULT_POD_TYPE))
tasks.check_tasks_updated(SERVICE_NAME, '{}-0-{}'.format(DEFAULT_POD_TYPE, DEFAULT_TASK_NAME), broker_0_id)
tasks.check_running(SERVICE_NAME, DEFAULT_BROKER_COUNT)
# --------- Topics -------------
@pytest.mark.smoke
@pytest.mark.sanity
def test_topic_create():
create_info = service_cli(
'topic create {}'.format(EPHEMERAL_TOPIC_NAME)
)
print(create_info)
assert ('Created topic "%s".\n' % EPHEMERAL_TOPIC_NAME in create_info['message'])
assert ("topics with a period ('.') or underscore ('_') could collide." in create_info['message'])
topic_list_info = service_cli('topic list')
assert topic_list_info == [EPHEMERAL_TOPIC_NAME]
topic_info = service_cli('topic describe {}'.format(EPHEMERAL_TOPIC_NAME))
assert len(topic_info) == 1
assert len(topic_info['partitions']) == DEFAULT_PARTITION_COUNT
@pytest.mark.smoke
@pytest.mark.sanity
def test_topic_delete():
delete_info = service_cli('topic delete {}'.format(EPHEMERAL_TOPIC_NAME))
assert len(delete_info) == 1
assert delete_info['message'].startswith('Output: Topic {} is marked for deletion'.format(EPHEMERAL_TOPIC_NAME))
topic_info = service_cli('topic describe {}'.format(EPHEMERAL_TOPIC_NAME))
assert len(topic_info) == 1
assert len(topic_info['partitions']) == DEFAULT_PARTITION_COUNT
@pytest.fixture
def default_topic():
service_cli('topic create {}'.format(DEFAULT_TOPIC_NAME))
@pytest.mark.sanity
def test_topic_partition_count(default_topic):
topic_info = service_cli('topic describe {}'.format(DEFAULT_TOPIC_NAME))
assert len(topic_info['partitions']) == DEFAULT_PARTITION_COUNT
@pytest.mark.sanity
def test_topic_offsets_increase_with_writes():
offset_info = service_cli('topic offsets --time="-1" {}'.format(DEFAULT_TOPIC_NAME))
assert len(offset_info) == DEFAULT_PARTITION_COUNT
offsets = {}
for o in offset_info:
assert len(o) == DEFAULT_REPLICATION_FACTOR
offsets.update(o)
assert len(offsets) == DEFAULT_PARTITION_COUNT
num_messages = 10
write_info = service_cli('topic producer_test {} {}'.format(DEFAULT_TOPIC_NAME, num_messages))
assert len(write_info) == 1
assert write_info['message'].startswith('Output: {} records sent'.format(num_messages))
offset_info = service_cli('topic offsets --time="-1" {}'.format(DEFAULT_TOPIC_NAME))
assert len(offset_info) == DEFAULT_PARTITION_COUNT
post_write_offsets = {}
for offsets in offset_info:
assert len(o) == DEFAULT_REPLICATION_FACTOR
post_write_offsets.update(o)
assert not offsets == post_write_offsets
@pytest.mark.sanity
def test_decreasing_topic_partitions_fails():
partition_info = service_cli('topic partitions {} {}'.format(DEFAULT_TOPIC_NAME, DEFAULT_PARTITION_COUNT - 1))
assert len(partition_info) == 1
assert partition_info['message'].startswith('Output: WARNING: If partitions are increased')
assert ('The number of partitions for a topic can only be increased' in partition_info['message'])
@pytest.mark.sanity
def test_setting_topic_partitions_to_same_value_fails():
partition_info = service_cli('topic partitions {} {}'.format(DEFAULT_TOPIC_NAME, DEFAULT_PARTITION_COUNT))
assert len(partition_info) == 1
assert partition_info['message'].startswith('Output: WARNING: If partitions are increased')
assert ('The number of partitions for a topic can only be increased' in partition_info['message'])
@pytest.mark.sanity
def test_increasing_topic_partitions_succeeds():
partition_info = service_cli('topic partitions {} {}'.format(DEFAULT_TOPIC_NAME, DEFAULT_PARTITION_COUNT + 1))
assert len(partition_info) == 1
assert partition_info['message'].startswith('Output: WARNING: If partitions are increased')
assert ('The number of partitions for a topic can only be increased' not in partition_info['message'])
@pytest.mark.sanity
def test_no_under_replicated_topics_exist():
partition_info = service_cli('topic under_replicated_partitions')
assert len(partition_info) == 1
assert partition_info['message'] == ''
@pytest.mark.sanity
def test_no_unavailable_partitions_exist():
partition_info = service_cli('topic unavailable_partitions')
assert len(partition_info) == 1
assert partition_info['message'] == ''
# --------- Cli -------------
@pytest.mark.smoke
@pytest.mark.sanity
def test_help_cli():
command.run_cli('help')
@pytest.mark.smoke
@pytest.mark.sanity
def test_config_cli():
configs = service_cli('config list')
assert len(configs) == 1
assert service_cli('config show {}'.format(configs[0]))
assert service_cli('config target')
assert service_cli('config target_id')
@pytest.mark.smoke
@pytest.mark.sanity
def test_plan_cli():
assert service_cli('plan list')
assert service_cli('plan show {}'.format(DEFAULT_PLAN_NAME))
assert service_cli('plan interrupt {} {}'.format(DEFAULT_PLAN_NAME, DEFAULT_PHASE_NAME))
assert service_cli('plan continue {} {}'.format(DEFAULT_PLAN_NAME, DEFAULT_PHASE_NAME))
@pytest.mark.smoke1
@pytest.mark.sanity1
# state gives error, now sure why? disabling for the moment
def test_state_cli():
assert service_cli('state framework_id')
assert service_cli('state properties')
@pytest.mark.smoke
@pytest.mark.sanity
def test_pods_cli():
assert service_cli('pods list')
assert service_cli('pods status {}-0'.format(DEFAULT_POD_TYPE))
assert service_cli('pods info {}-0'.format(DEFAULT_POD_TYPE))
# --------- Suppressed -------------
@pytest.mark.smoke
@pytest.mark.smoke
def test_suppress():
dcos_url = dcos.config.get_config_val('core.dcos_url')
suppressed_url = urllib.parse.urljoin(dcos_url,
'service/{}/v1/state/properties/suppressed'.format(PACKAGE_NAME))
def fun():
response = dcos.http.get(suppressed_url)
response.raise_for_status()
return response.text == "true"
spin.time_wait_noisy(fun)
|
nelmiux/CarnotKE
|
refs/heads/master
|
jyhton/lib-python/2.7/email/__init__.py
|
262
|
# Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""A package for parsing, handling, and generating email messages."""
__version__ = '4.0.3'
__all__ = [
# Old names
'base64MIME',
'Charset',
'Encoders',
'Errors',
'Generator',
'Header',
'Iterators',
'Message',
'MIMEAudio',
'MIMEBase',
'MIMEImage',
'MIMEMessage',
'MIMEMultipart',
'MIMENonMultipart',
'MIMEText',
'Parser',
'quopriMIME',
'Utils',
'message_from_string',
'message_from_file',
# new names
'base64mime',
'charset',
'encoders',
'errors',
'generator',
'header',
'iterators',
'message',
'mime',
'parser',
'quoprimime',
'utils',
]
# Some convenience routines. Don't import Parser and Message as side-effects
# of importing email since those cascadingly import most of the rest of the
# email package.
def message_from_string(s, *args, **kws):
"""Parse a string into a Message object model.
Optional _class and strict are passed to the Parser constructor.
"""
from email.parser import Parser
return Parser(*args, **kws).parsestr(s)
def message_from_file(fp, *args, **kws):
"""Read a file and parse its contents into a Message object model.
Optional _class and strict are passed to the Parser constructor.
"""
from email.parser import Parser
return Parser(*args, **kws).parse(fp)
# Lazy loading to provide name mapping from new-style names (PEP 8 compatible
# email 4.0 module names), to old-style names (email 3.0 module names).
import sys
class LazyImporter(object):
def __init__(self, module_name):
self.__name__ = 'email.' + module_name
def __getattr__(self, name):
__import__(self.__name__)
mod = sys.modules[self.__name__]
self.__dict__.update(mod.__dict__)
return getattr(mod, name)
_LOWERNAMES = [
# email.<old name> -> email.<new name is lowercased old name>
'Charset',
'Encoders',
'Errors',
'FeedParser',
'Generator',
'Header',
'Iterators',
'Message',
'Parser',
'Utils',
'base64MIME',
'quopriMIME',
]
_MIMENAMES = [
# email.MIME<old name> -> email.mime.<new name is lowercased old name>
'Audio',
'Base',
'Image',
'Message',
'Multipart',
'NonMultipart',
'Text',
]
for _name in _LOWERNAMES:
importer = LazyImporter(_name.lower())
sys.modules['email.' + _name] = importer
setattr(sys.modules['email'], _name, importer)
import email.mime
for _name in _MIMENAMES:
importer = LazyImporter('mime.' + _name.lower())
sys.modules['email.MIME' + _name] = importer
setattr(sys.modules['email'], 'MIME' + _name, importer)
setattr(sys.modules['email.mime'], _name, importer)
|
hoosteeno/bedrock
|
refs/heads/master
|
bedrock/base/waffle_config.py
|
6
|
from time import time
from everett.manager import (
ConfigDictEnv,
ConfigEnvFileEnv,
ConfigManager,
ConfigOSEnv,
)
from bedrock.base.models import get_config_dict
class ConfigDBEnv(ConfigDictEnv):
def __init__(self):
# have to use this directly since settings aren't yet setup
# when we use this in the settings file
self._data = None
self.timeout = 300
self.last_update = 0
def get_cache(self):
if time() > self.last_update + self.timeout:
return None
return self._data
def set_cache(self, data):
self._data = data
self.last_update = time()
@property
def cfg(self):
# this is the method called by the get method
# of the superclass
configs = self.get_cache()
if not configs:
configs = get_config_dict()
self.set_cache(configs)
return configs
config = ConfigManager([
ConfigOSEnv(),
ConfigEnvFileEnv('.env'),
ConfigDBEnv(),
])
|
Datera/cinder
|
refs/heads/datera_queens_backport
|
cinder/volume/drivers/netapp/dataontap/nfs_base.py
|
1
|
# Copyright (c) 2012 NetApp, Inc. All rights reserved.
# Copyright (c) 2014 Ben Swartzlander. All rights reserved.
# Copyright (c) 2014 Navneet Singh. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Bob Callaway. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for NetApp NFS storage.
"""
import copy
import math
import os
import re
import threading
import time
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import netutils
from oslo_utils import units
import six
from six.moves import urllib
from cinder import exception
from cinder.i18n import _
from cinder.image import image_utils
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume.drivers import nfs
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
HOUSEKEEPING_INTERVAL_SECONDS = 600 # ten minutes
@six.add_metaclass(utils.TraceWrapperWithABCMetaclass)
class NetAppNfsDriver(driver.ManageableVD,
driver.CloneableImageVD,
nfs.NfsDriver):
"""Base class for NetApp NFS driver for Data ONTAP."""
# do not increment this as it may be used in volume type definitions
VERSION = "1.0.0"
# ThirdPartySystems wiki page
CI_WIKI_NAME = "NetApp_CI"
REQUIRED_FLAGS = ['netapp_login', 'netapp_password',
'netapp_server_hostname']
DEFAULT_FILTER_FUNCTION = 'capabilities.utilization < 70'
DEFAULT_GOODNESS_FUNCTION = '100 - capabilities.utilization'
def __init__(self, *args, **kwargs):
na_utils.validate_instantiation(**kwargs)
self._execute = None
self._context = None
self.app_version = kwargs.pop("app_version", "unknown")
kwargs['supports_auto_mosr'] = True
super(NetAppNfsDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(na_opts.netapp_connection_opts)
self.configuration.append_config_values(na_opts.netapp_basicauth_opts)
self.configuration.append_config_values(na_opts.netapp_transport_opts)
self.configuration.append_config_values(na_opts.netapp_img_cache_opts)
self.configuration.append_config_values(na_opts.netapp_nfs_extra_opts)
self.backend_name = self.host.split('@')[1]
self.loopingcalls = loopingcalls.LoopingCalls()
def do_setup(self, context):
super(NetAppNfsDriver, self).do_setup(context)
self._context = context
na_utils.check_flags(self.REQUIRED_FLAGS, self.configuration)
self.zapi_client = None
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met."""
super(NetAppNfsDriver, self).check_for_setup_error()
self.loopingcalls.start_tasks()
def _add_looping_tasks(self):
"""Add tasks that need to be executed at a fixed interval.
Inheriting class overrides and then explicitly calls this method.
"""
# Add the task that deletes snapshots marked for deletion.
self.loopingcalls.add_task(
self._delete_snapshots_marked_for_deletion,
loopingcalls.ONE_MINUTE,
loopingcalls.ONE_MINUTE)
# Add the task that logs EMS messages
self.loopingcalls.add_task(
self._handle_ems_logging,
loopingcalls.ONE_HOUR)
def _delete_snapshots_marked_for_deletion(self):
snapshots = self.zapi_client.get_snapshots_marked_for_deletion()
for snapshot in snapshots:
self.zapi_client.delete_snapshot(
snapshot['volume_name'], snapshot['name'])
def _handle_ems_logging(self):
"""Log autosupport messages."""
raise NotImplementedError()
def get_pool(self, volume):
"""Return pool name where volume resides.
:param volume: The volume hosted by the driver.
:return: Name of the pool where given volume is hosted.
"""
return volume['provider_location']
def create_volume(self, volume):
"""Creates a volume.
:param volume: volume reference
"""
LOG.debug('create_volume on %s', volume['host'])
self._ensure_shares_mounted()
# get share as pool name
pool_name = volume_utils.extract_host(volume['host'], level='pool')
if pool_name is None:
msg = _("Pool is not available in the volume host field.")
raise exception.InvalidHost(reason=msg)
extra_specs = na_utils.get_volume_extra_specs(volume)
try:
volume['provider_location'] = pool_name
LOG.debug('Using pool %s.', pool_name)
self._do_create_volume(volume)
self._do_qos_for_volume(volume, extra_specs)
model_update = self._get_volume_model_update(volume) or {}
model_update['provider_location'] = volume['provider_location']
return model_update
except Exception:
LOG.exception("Exception creating vol %(name)s on "
"pool %(pool)s.",
{'name': volume['name'],
'pool': volume['provider_location']})
# We need to set this for the model update in order for the
# manager to behave correctly.
volume['provider_location'] = None
msg = _("Volume %(vol)s could not be created in pool %(pool)s.")
raise exception.VolumeBackendAPIException(data=msg % {
'vol': volume['name'], 'pool': pool_name})
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
source = {
'name': snapshot['name'],
'size': snapshot['volume_size'],
'id': snapshot['volume_id'],
}
return self._clone_source_to_destination_volume(source, volume)
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
source = {'name': src_vref['name'],
'size': src_vref['size'],
'id': src_vref['id']}
return self._clone_source_to_destination_volume(source, volume)
def _clone_source_to_destination_volume(self, source, destination_volume):
share = self._get_volume_location(source['id'])
extra_specs = na_utils.get_volume_extra_specs(destination_volume)
try:
destination_volume['provider_location'] = share
self._clone_with_extension_check(
source, destination_volume)
self._do_qos_for_volume(destination_volume, extra_specs)
model_update = (
self._get_volume_model_update(destination_volume) or {})
model_update['provider_location'] = destination_volume[
'provider_location']
return model_update
except Exception:
LOG.exception("Exception creating volume %(name)s from source "
"%(source)s on share %(share)s.",
{'name': destination_volume['id'],
'source': source['name'],
'share': destination_volume['provider_location']})
msg = _("Volume %s could not be created on shares.")
raise exception.VolumeBackendAPIException(data=msg % (
destination_volume['id']))
def _clone_with_extension_check(self, source, destination_volume):
source_size = source['size']
source_id = source['id']
source_name = source['name']
destination_volume_size = destination_volume['size']
self._clone_backing_file_for_volume(source_name,
destination_volume['name'],
source_id)
path = self.local_path(destination_volume)
if self._discover_file_till_timeout(path):
self._set_rw_permissions(path)
if destination_volume_size != source_size:
try:
self.extend_volume(destination_volume,
destination_volume_size)
except Exception:
LOG.error("Resizing %s failed. Cleaning "
"volume.", destination_volume['name'])
self._cleanup_volume_on_failure(destination_volume)
raise exception.CinderException(
_("Resizing clone %s failed.")
% destination_volume['name'])
else:
raise exception.CinderException(_("NFS file %s not discovered.")
% destination_volume['name'])
def _cleanup_volume_on_failure(self, volume):
LOG.debug('Cleaning up, failed operation on %s', volume['name'])
vol_path = self.local_path(volume)
if os.path.exists(vol_path):
LOG.debug('Found %s, deleting ...', vol_path)
self._delete_file_at_path(vol_path)
else:
LOG.debug('Could not find %s, continuing ...', vol_path)
def _do_qos_for_volume(self, volume, extra_specs, cleanup=False):
"""Set QoS policy on backend from volume type information."""
raise NotImplementedError()
def _get_volume_model_update(self, volume):
"""Provide any updates necessary for a volume being created/managed."""
raise NotImplementedError()
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
self._clone_backing_file_for_volume(snapshot['volume_name'],
snapshot['name'],
snapshot['volume_id'],
is_snapshot=True)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
self._delete_file(snapshot.volume_id, snapshot.name)
def _delete_file(self, file_id, file_name):
nfs_share = self._get_provider_location(file_id)
if self._volume_not_present(nfs_share, file_name):
LOG.debug('File %(file_name)s not found when attempting to delete '
'from share %(share)s',
{'file_name': file_name, 'share': nfs_share})
return
path = self._get_volume_path(nfs_share, file_name)
self._delete(path)
def _get_volume_location(self, volume_id):
"""Returns NFS mount address as <nfs_ip_address>:<nfs_mount_dir>."""
provider_location = self._get_provider_location(volume_id)
nfs_server_ip, export_path = na_utils.get_export_host_junction_path(
provider_location)
if netutils.is_valid_ipv6(nfs_server_ip):
nfs_server_ip = netutils.escape_ipv6(nfs_server_ip)
return nfs_server_ip + ':' + export_path
def _clone_backing_file_for_volume(self, volume_name, clone_name,
volume_id, share=None,
is_snapshot=False,
source_snapshot=None):
"""Clone backing file for Cinder volume."""
raise NotImplementedError()
def _get_backing_flexvol_names(self):
"""Returns backing flexvol names."""
raise NotImplementedError()
def _get_flexvol_names_from_hosts(self, hosts):
"""Returns a set of flexvol names."""
raise NotImplementedError()
def _get_provider_location(self, volume_id):
"""Returns provider location for given volume."""
volume = self.db.volume_get(self._context, volume_id)
return volume.provider_location
def _volume_not_present(self, nfs_mount, volume_name):
"""Check if volume exists."""
try:
self._try_execute('ls', self._get_volume_path(nfs_mount,
volume_name))
except processutils.ProcessExecutionError:
# If the volume isn't present
return True
return False
def _try_execute(self, *command, **kwargs):
# NOTE(vish): Volume commands can partially fail due to timing, but
# running them a second time on failure will usually
# recover nicely.
tries = 0
while True:
try:
self._execute(*command, **kwargs)
return True
except processutils.ProcessExecutionError:
tries += 1
if tries >= self.configuration.num_shell_tries:
raise
LOG.exception("Recovering from a failed execute. "
"Try number %s", tries)
time.sleep(tries ** 2)
def _get_volume_path(self, nfs_share, volume_name):
"""Get volume path.
Get volume path (local fs path) for given volume name on given nfs
share.
:param nfs_share: string, example 172.18.194.100:/var/nfs
:param volume_name: string,
example volume-91ee65ec-c473-4391-8c09-162b00c68a8c
"""
return os.path.join(self._get_mount_point_for_share(nfs_share),
volume_name)
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
raise NotImplementedError()
def get_default_filter_function(self):
"""Get the default filter_function string."""
return self.DEFAULT_FILTER_FUNCTION
def get_default_goodness_function(self):
"""Get the default goodness_function string."""
return self.DEFAULT_GOODNESS_FUNCTION
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
super(NetAppNfsDriver, self).copy_image_to_volume(
context, volume, image_service, image_id)
LOG.info('Copied image to volume %s using regular download.',
volume['id'])
self._register_image_in_cache(volume, image_id)
def _register_image_in_cache(self, volume, image_id):
"""Stores image in the cache."""
file_name = 'img-cache-%s' % image_id
LOG.info("Registering image in cache %s", file_name)
try:
self._do_clone_rel_img_cache(
volume['name'], file_name,
volume['provider_location'], file_name)
except Exception as e:
LOG.warning('Exception while registering image %(image_id)s'
' in cache. Exception: %(exc)s',
{'image_id': image_id, 'exc': e})
def _find_image_in_cache(self, image_id):
"""Finds image in cache and returns list of shares with file name."""
result = []
if getattr(self, '_mounted_shares', None):
for share in self._mounted_shares:
dir = self._get_mount_point_for_share(share)
file_name = 'img-cache-%s' % image_id
file_path = '%s/%s' % (dir, file_name)
if os.path.isfile(file_path):
LOG.debug('Found cache file for image %(image_id)s'
' on share %(share)s',
{'image_id': image_id, 'share': share})
result.append((share, file_name))
return result
def _do_clone_rel_img_cache(self, src, dst, share, cache_file):
"""Do clone operation w.r.t image cache file."""
@utils.synchronized(cache_file, external=True)
def _do_clone():
dir = self._get_mount_point_for_share(share)
file_path = '%s/%s' % (dir, dst)
if not os.path.exists(file_path):
LOG.info('Cloning from cache to destination %s', dst)
self._clone_backing_file_for_volume(src, dst, volume_id=None,
share=share)
src_path = '%s/%s' % (dir, src)
os.utime(src_path, None)
_do_clone()
@utils.synchronized('clean_cache')
def _spawn_clean_cache_job(self):
"""Spawns a clean task if not running."""
if getattr(self, 'cleaning', None):
LOG.debug('Image cache cleaning in progress. Returning... ')
return
else:
# Set cleaning to True
self.cleaning = True
t = threading.Timer(0, self._clean_image_cache)
t.start()
def _clean_image_cache(self):
"""Clean the image cache files in cache of space crunch."""
try:
LOG.debug('Image cache cleaning in progress.')
thres_size_perc_start =\
self.configuration.thres_avl_size_perc_start
thres_size_perc_stop = \
self.configuration.thres_avl_size_perc_stop
for share in getattr(self, '_mounted_shares', []):
try:
total_size, total_avl = \
self._get_capacity_info(share)
avl_percent = int((float(total_avl) / total_size) * 100)
if avl_percent <= thres_size_perc_start:
LOG.info('Cleaning cache for share %s.', share)
eligible_files = self._find_old_cache_files(share)
threshold_size = int(
(thres_size_perc_stop * total_size) / 100)
bytes_to_free = int(threshold_size - total_avl)
LOG.debug('Files to be queued for deletion %s',
eligible_files)
self._delete_files_till_bytes_free(
eligible_files, share, bytes_to_free)
else:
continue
except Exception as e:
LOG.warning('Exception during cache cleaning'
' %(share)s. Message - %(ex)s',
{'share': share, 'ex': e})
continue
finally:
LOG.debug('Image cache cleaning done.')
self.cleaning = False
def _shortlist_del_eligible_files(self, share, old_files):
"""Prepares list of eligible files to be deleted from cache."""
raise NotImplementedError()
def _find_old_cache_files(self, share):
"""Finds the old files in cache."""
mount_fs = self._get_mount_point_for_share(share)
threshold_minutes = self.configuration.expiry_thres_minutes
cmd = ['find', mount_fs, '-maxdepth', '1', '-name',
'img-cache*', '-amin', '+%s' % threshold_minutes]
res, _err = self._execute(*cmd, run_as_root=self._execute_as_root)
if res:
old_file_paths = res.strip('\n').split('\n')
mount_fs_len = len(mount_fs)
old_files = [x[mount_fs_len + 1:] for x in old_file_paths]
eligible_files = self._shortlist_del_eligible_files(
share, old_files)
return eligible_files
return []
def _delete_files_till_bytes_free(self, file_list, share, bytes_to_free=0):
"""Delete files from disk till bytes are freed or list exhausted."""
LOG.debug('Bytes to free %s', bytes_to_free)
if file_list and bytes_to_free > 0:
sorted_files = sorted(file_list, key=lambda x: x[1], reverse=True)
mount_fs = self._get_mount_point_for_share(share)
for f in sorted_files:
if f:
file_path = '%s/%s' % (mount_fs, f[0])
LOG.debug('Delete file path %s', file_path)
@utils.synchronized(f[0], external=True)
def _do_delete():
if self._delete_file_at_path(file_path):
return True
return False
if _do_delete():
bytes_to_free -= int(f[1])
if bytes_to_free <= 0:
return
def _delete_file_at_path(self, path):
"""Delete file from disk and return result as boolean."""
try:
LOG.debug('Deleting file at path %s', path)
cmd = ['rm', '-f', path]
self._execute(*cmd, run_as_root=self._execute_as_root)
return True
except Exception as ex:
LOG.warning('Exception during deleting %s', ex)
return False
def _copy_from_cache(self, volume, image_id, cache_result):
"""Try copying image file_name from cached file"""
raise NotImplementedError()
def _copy_from_img_service(self, context, volume, image_service,
image_id):
raise NotImplementedError()
def clone_image(self, context, volume,
image_location, image_meta,
image_service):
"""Create a volume efficiently from an existing image.
image_location is a string whose format depends on the
image service backend in use. The driver should use it
to determine whether cloning is possible.
Returns a dict of volume properties eg. provider_location,
boolean indicating whether cloning occurred.
"""
image_id = image_meta['id']
cloned = False
post_clone = False
extra_specs = na_utils.get_volume_extra_specs(volume)
major, minor = self.zapi_client.get_ontapi_version()
col_path = self.configuration.netapp_copyoffload_tool_path
try:
cache_result = self._find_image_in_cache(image_id)
if cache_result:
cloned = self._copy_from_cache(volume, image_id, cache_result)
else:
cloned = self._direct_nfs_clone(volume, image_location,
image_id)
# Try to use the copy offload tool
if not cloned and col_path and major == 1 and minor >= 20:
cloned = self._copy_from_img_service(context, volume,
image_service, image_id)
if cloned:
self._do_qos_for_volume(volume, extra_specs)
post_clone = self._post_clone_image(volume)
except Exception as e:
msg = e.msg if getattr(e, 'msg', None) else e
LOG.info('Image cloning unsuccessful for image'
' %(image_id)s. Message: %(msg)s',
{'image_id': image_id, 'msg': msg})
finally:
cloned = cloned and post_clone
share = (volume_utils.extract_host(volume['host'], level='pool')
if cloned else None)
bootable = True if cloned else False
return {'provider_location': share, 'bootable': bootable}, cloned
def _clone_from_cache(self, volume, image_id, cache_result):
"""Clones a copy from image cache."""
cloned = False
LOG.info('Cloning image %s from cache', image_id)
path = volume.host.split('#')[1]
for res in cache_result:
# Repeat tries in other shares if failed in some
(share, file_name) = res
if path == share:
LOG.debug('Cache share: %s', share)
if (share and
self._is_share_clone_compatible(volume, share)):
try:
self._do_clone_rel_img_cache(
file_name, volume['name'], share, file_name)
cloned = True
volume['provider_location'] = share
break
except Exception:
LOG.warning('Unexpected exception during'
' image cloning in share %s', share)
return cloned
def _direct_nfs_clone(self, volume, image_location, image_id):
"""Clone directly in nfs share."""
LOG.info('Checking image clone %s from glance share.', image_id)
cloned = False
image_locations = self._construct_image_nfs_url(image_location)
run_as_root = self._execute_as_root
for loc in image_locations:
share = self._is_cloneable_share(loc)
if share and self._is_share_clone_compatible(volume, share):
LOG.debug('Share is cloneable %s', share)
(__, ___, img_file) = loc.rpartition('/')
dir_path = self._get_mount_point_for_share(share)
img_path = '%s/%s' % (dir_path, img_file)
img_info = image_utils.qemu_img_info(img_path,
run_as_root=run_as_root)
if img_info.file_format == 'raw':
LOG.debug('Image is raw %s', image_id)
self._clone_backing_file_for_volume(
img_file, volume['name'],
volume_id=None, share=share)
cloned = True
break
else:
LOG.info(
'Image will locally be converted to raw %s',
image_id)
dst = '%s/%s' % (dir_path, volume['name'])
image_utils.convert_image(img_path, dst, 'raw',
run_as_root=run_as_root)
data = image_utils.qemu_img_info(dst,
run_as_root=run_as_root)
if data.file_format != "raw":
raise exception.InvalidResults(
_("Converted to raw, but"
" format is now %s") % data.file_format)
else:
cloned = True
self._register_image_in_cache(
volume, image_id)
break
return cloned
def _post_clone_image(self, volume):
"""Do operations post image cloning."""
LOG.info('Performing post clone for %s', volume['name'])
share = volume_utils.extract_host(volume['host'], level='pool')
vol_path = self._get_volume_path(share, volume['name'])
if self._discover_file_till_timeout(vol_path):
self._set_rw_permissions(vol_path)
self._resize_image_file(vol_path, volume['size'])
return True
raise exception.InvalidResults(
_("NFS file could not be discovered."))
def _resize_image_file(self, path, new_size):
"""Resize the image file on share to new size."""
LOG.debug('Checking file for resize')
if self._is_file_size_equal(path, new_size):
return
else:
LOG.info('Resizing file to %sG', new_size)
image_utils.resize_image(path, new_size,
run_as_root=self._execute_as_root)
if self._is_file_size_equal(path, new_size):
return
else:
raise exception.InvalidResults(
_('Resizing image file failed.'))
def _is_file_size_equal(self, path, size):
"""Checks if file size at path is equal to size."""
data = image_utils.qemu_img_info(path,
run_as_root=self._execute_as_root)
virt_size = data.virtual_size / units.Gi
if virt_size == size:
return True
else:
return False
def _touch_path_to_refresh(self, path):
try:
# Touching parent directory forces NFS client to flush its cache.
self._execute('touch', path, run_as_root=self._execute_as_root)
except processutils.ProcessExecutionError:
LOG.exception("Failed to touch path %s.", path)
def _discover_file_till_timeout(self, path, timeout=75):
"""Checks if file size at path is equal to size."""
# Sometimes nfs takes time to discover file
# Retrying in case any unexpected situation occurs
#
# The NFS client by default has a 60 second maximum
# cache time before it refreshes directory information.
# (See man 5 nfs acdirmax.) Allow 75 seconds for
# retries to ensure that this cache has refreshed.
retry_seconds = timeout
sleep_interval = 2
base_path = os.path.dirname(path)
self._touch_path_to_refresh(base_path)
while True:
if os.path.exists(path):
return True
else:
if retry_seconds <= 0:
LOG.warning('Discover file retries exhausted.')
return False
else:
time.sleep(sleep_interval)
retry_seconds -= sleep_interval
self._touch_path_to_refresh(base_path)
def _is_cloneable_share(self, image_location):
"""Finds if the image at location is cloneable."""
conn, dr = self._check_get_nfs_path_segs(image_location)
return self._check_share_in_use(conn, dr)
def _check_get_nfs_path_segs(self, image_location):
"""Checks if the nfs path format is matched.
WebNFS url format with relative-path is supported.
Accepting all characters in path-names and checking
against the mounted shares which will contain only
allowed path segments. Returns connection and dir details.
"""
conn, dr = None, None
if image_location:
nfs_loc_pattern = \
(r'^nfs://(([\w\-\.]+:{1}[\d]+|[\w\-\.]+)(/[^\/].*)'
r'*(/[^\/\\\\]+)$)')
matched = re.match(nfs_loc_pattern, image_location, flags=0)
if not matched:
LOG.debug('Image location not in the'
' expected format %s', image_location)
else:
conn = matched.group(2)
dr = matched.group(3) or '/'
return conn, dr
def _share_match_for_ip(self, ip, shares):
"""Returns the share that is served by ip.
Multiple shares can have same dir path but
can be served using different ips. It finds the
share which is served by ip on same nfs server.
"""
raise NotImplementedError()
def _check_share_in_use(self, conn, dir):
"""Checks if share is cinder mounted and returns it."""
try:
if conn:
host = conn.split(':')[0]
ip = na_utils.resolve_hostname(host)
share_candidates = []
for sh in self._mounted_shares:
sh_exp = sh.split(':')[-1]
if sh_exp == dir:
share_candidates.append(sh)
if share_candidates:
LOG.debug('Found possible share matches %s',
share_candidates)
return self._share_match_for_ip(ip, share_candidates)
except Exception:
LOG.warning("Unexpected exception while "
"short listing used share.")
return None
def _construct_image_nfs_url(self, image_location):
"""Construct direct url for nfs backend.
It creates direct url from image_location
which is a tuple with direct_url and locations.
Returns array of urls with nfs scheme if nfs store
else returns url. It needs to be verified
by backend before use.
"""
direct_url, locations = image_location
if not direct_url and not locations:
raise exception.NotFound(_('Image location not present.'))
urls = []
if not locations:
urls.append(direct_url)
else:
for location in locations:
if not location['metadata']:
continue
location_type = location['metadata'].get('type')
if not location_type or location_type.lower() != "nfs":
continue
share_location = location['metadata'].get('share_location')
mountpoint = location['metadata'].get('mountpoint')
if not share_location or not mountpoint:
continue
url = location['url']
url_parse = urllib.parse.urlparse(url)
abs_path = os.path.join(url_parse.netloc, url_parse.path)
rel_path = os.path.relpath(abs_path, mountpoint)
direct_url = "%s/%s" % (share_location, rel_path)
urls.append(direct_url)
return urls
def extend_volume(self, volume, new_size):
"""Extend an existing volume to the new size."""
LOG.info('Extending volume %s.', volume['name'])
try:
path = self.local_path(volume)
self._resize_image_file(path, new_size)
except Exception as err:
exception_msg = (_("Failed to extend volume "
"%(name)s, Error msg: %(msg)s.") %
{'name': volume['name'],
'msg': six.text_type(err)})
raise exception.VolumeBackendAPIException(data=exception_msg)
try:
extra_specs = na_utils.get_volume_extra_specs(volume)
# Create volume copy with new size for size-dependent QOS specs
volume_copy = copy.copy(volume)
volume_copy['size'] = new_size
self._do_qos_for_volume(volume_copy, extra_specs, cleanup=False)
except Exception as err:
exception_msg = (_("Failed to set QoS for existing volume "
"%(name)s, Error msg: %(msg)s.") %
{'name': volume['name'],
'msg': six.text_type(err)})
raise exception.VolumeBackendAPIException(data=exception_msg)
def _is_share_clone_compatible(self, volume, share):
"""Checks if share is compatible with volume to host its clone."""
raise NotImplementedError()
def _check_share_can_hold_size(self, share, size):
"""Checks if volume can hold image with size."""
_tot_size, tot_available = self._get_capacity_info(
share)
if tot_available < size:
msg = _("Container size smaller than required file size.")
raise exception.VolumeDriverException(msg)
def _move_nfs_file(self, source_path, dest_path):
"""Moves source to destination."""
@utils.synchronized(dest_path, external=True)
def _move_file(src, dst):
if os.path.exists(dst):
LOG.warning("Destination %s already exists.", dst)
return False
self._execute('mv', src, dst, run_as_root=self._execute_as_root)
return True
try:
return _move_file(source_path, dest_path)
except Exception as e:
LOG.warning('Exception moving file %(src)s. Message - %(e)s',
{'src': source_path, 'e': e})
return False
def _get_export_ip_path(self, volume_id=None, share=None):
"""Returns export ip and path.
One of volume id or share is used to return the values.
"""
if volume_id:
provider_location = self._get_provider_location(volume_id)
host_ip, export_path = na_utils.get_export_host_junction_path(
provider_location)
elif share:
host_ip, export_path = na_utils.get_export_host_junction_path(
share)
else:
raise exception.InvalidInput(
'A volume ID or share was not specified.')
return host_ip, export_path
def _get_share_capacity_info(self, nfs_share):
"""Returns the share capacity metrics needed by the scheduler."""
capacity = dict()
capacity['reserved_percentage'] = self.reserved_percentage
capacity['max_over_subscription_ratio'] = (
self.max_over_subscription_ratio)
total_size, total_available = self._get_capacity_info(nfs_share)
capacity['total_capacity_gb'] = na_utils.round_down(
total_size / units.Gi)
capacity['free_capacity_gb'] = na_utils.round_down(
total_available / units.Gi)
return capacity
def _get_capacity_info(self, nfs_share):
"""Get total capacity and free capacity in bytes for an nfs share."""
export_path = nfs_share.rsplit(':', 1)[1]
capacity = self.zapi_client.get_flexvol_capacity(
flexvol_path=export_path)
return capacity['size-total'], capacity['size-available']
def _check_volume_type(self, volume, share, file_name, extra_specs):
"""Match volume type for share file."""
def _convert_vol_ref_share_name_to_share_ip(self, vol_ref):
"""Converts the share point name to an IP address
The volume reference may have a DNS name portion in the share name.
Convert that to an IP address and then restore the entire path.
:param vol_ref: Driver-specific information used to identify a volume
:return: A volume reference where share is in IP format.
"""
# First strip out share and convert to IP format.
share_split = vol_ref.rsplit(':', 1)
vol_ref_share_ip = na_utils.resolve_hostname(share_split[0])
# Now place back into volume reference.
vol_ref_share = vol_ref_share_ip + ':' + share_split[1]
return vol_ref_share
def _get_share_mount_and_vol_from_vol_ref(self, vol_ref):
"""Get the NFS share, the NFS mount, and the volume from reference
Determine the NFS share point, the NFS mount point, and the volume
(with possible path) from the given volume reference. Raise exception
if unsuccessful.
:param vol_ref: Driver-specific information used to identify a volume
:return: NFS Share, NFS mount, volume path or raise error
"""
# Check that the reference is valid.
if 'source-name' not in vol_ref:
reason = _('Reference must contain source-name element.')
raise exception.ManageExistingInvalidReference(
existing_ref=vol_ref, reason=reason)
vol_ref_name = vol_ref['source-name']
self._ensure_shares_mounted()
# If a share was declared as '1.2.3.4:/a/b/c' in the nfs_shares_config
# file, but the admin tries to manage the file located at
# 'my.hostname.com:/a/b/c/d.vol', this might cause a lookup miss below
# when searching self._mounted_shares to see if we have an existing
# mount that would work to access the volume-to-be-managed (a string
# comparison is done instead of IP comparison).
vol_ref_share = self._convert_vol_ref_share_name_to_share_ip(
vol_ref_name)
for nfs_share in self._mounted_shares:
cfg_share = self._convert_vol_ref_share_name_to_share_ip(nfs_share)
(orig_share, work_share, file_path) = \
vol_ref_share.partition(cfg_share)
if work_share == cfg_share:
file_path = file_path[1:] # strip off leading path divider
LOG.debug("Found possible share %s; checking mount.",
work_share)
nfs_mount = self._get_mount_point_for_share(nfs_share)
vol_full_path = os.path.join(nfs_mount, file_path)
if os.path.isfile(vol_full_path):
LOG.debug("Found share %(share)s and vol %(path)s on "
"mount %(mnt)s",
{'share': nfs_share, 'path': file_path,
'mnt': nfs_mount})
return nfs_share, nfs_mount, file_path
else:
LOG.debug("vol_ref %(ref)s not on share %(share)s.",
{'ref': vol_ref_share, 'share': nfs_share})
raise exception.ManageExistingInvalidReference(
existing_ref=vol_ref,
reason=_('Volume not found on configured storage backend.'))
def manage_existing(self, volume, existing_vol_ref):
"""Manages an existing volume.
The specified Cinder volume is to be taken into Cinder management.
The driver will verify its existence and then rename it to the
new Cinder volume name. It is expected that the existing volume
reference is an NFS share point and some [/path]/volume;
e.g., 10.10.32.1:/openstack/vol_to_manage
or 10.10.32.1:/openstack/some_directory/vol_to_manage
:param volume: Cinder volume to manage
:param existing_vol_ref: Driver-specific information used to identify a
volume
"""
# Attempt to find NFS share, NFS mount, and volume path from vol_ref.
(nfs_share, nfs_mount, vol_path) = \
self._get_share_mount_and_vol_from_vol_ref(existing_vol_ref)
LOG.debug("Asked to manage NFS volume %(vol)s, with vol ref %(ref)s",
{'vol': volume['id'],
'ref': existing_vol_ref['source-name']})
extra_specs = na_utils.get_volume_extra_specs(volume)
self._check_volume_type(volume, nfs_share, vol_path, extra_specs)
if vol_path == volume['name']:
LOG.debug("New Cinder volume %s name matches reference name: "
"no need to rename.", volume['name'])
else:
src_vol = os.path.join(nfs_mount, vol_path)
dst_vol = os.path.join(nfs_mount, volume['name'])
try:
self._execute("mv", src_vol, dst_vol,
run_as_root=self._execute_as_root,
check_exit_code=True)
LOG.debug("Setting newly managed Cinder volume name to %s",
volume['name'])
self._set_rw_permissions_for_all(dst_vol)
except processutils.ProcessExecutionError as err:
exception_msg = (_("Failed to manage existing volume %(name)s,"
" because rename operation failed:"
" Error msg: %(msg)s."),
{'name': existing_vol_ref['source-name'],
'msg': err})
raise exception.VolumeBackendAPIException(data=exception_msg)
try:
self._do_qos_for_volume(volume, extra_specs, cleanup=False)
except Exception as err:
exception_msg = (_("Failed to set QoS for existing volume "
"%(name)s, Error msg: %(msg)s.") %
{'name': existing_vol_ref['source-name'],
'msg': six.text_type(err)})
raise exception.VolumeBackendAPIException(data=exception_msg)
model_update = self._get_volume_model_update(volume) or {}
model_update['provider_location'] = nfs_share
return model_update
def manage_existing_get_size(self, volume, existing_vol_ref):
"""Returns the size of volume to be managed by manage_existing.
When calculating the size, round up to the next GB.
:param volume: Cinder volume to manage
:param existing_vol_ref: Existing volume to take under management
"""
# Attempt to find NFS share, NFS mount, and volume path from vol_ref.
(nfs_share, nfs_mount, vol_path) = \
self._get_share_mount_and_vol_from_vol_ref(existing_vol_ref)
try:
LOG.debug("Asked to get size of NFS vol_ref %s.",
existing_vol_ref['source-name'])
file_path = os.path.join(nfs_mount, vol_path)
file_size = float(utils.get_file_size(file_path)) / units.Gi
vol_size = int(math.ceil(file_size))
except (OSError, ValueError):
exception_message = (_("Failed to manage existing volume "
"%(name)s, because of error in getting "
"volume size."),
{'name': existing_vol_ref['source-name']})
raise exception.VolumeBackendAPIException(data=exception_message)
LOG.debug("Reporting size of NFS volume ref %(ref)s as %(size)d GB.",
{'ref': existing_vol_ref['source-name'], 'size': vol_size})
return vol_size
def unmanage(self, volume):
"""Removes the specified volume from Cinder management.
Does not delete the underlying backend storage object. A log entry
will be made to notify the Admin that the volume is no longer being
managed.
:param volume: Cinder volume to unmanage
"""
vol_str = CONF.volume_name_template % volume['id']
vol_path = os.path.join(volume['provider_location'], vol_str)
LOG.info('Cinder NFS volume with current path "%(cr)s" is '
'no longer being managed.', {'cr': vol_path})
|
weiqiangdragonite/blog_tmp
|
refs/heads/master
|
python/flask/microblog/db_repository/versions/002_migration.py
|
2
|
from sqlalchemy import *
from migrate import *
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
user = Table('user', pre_meta,
Column('id', INTEGER, primary_key=True, nullable=False),
Column('nickname', VARCHAR(length=64)),
Column('email', VARCHAR(length=120)),
Column('role', SMALLINT),
)
posts = Table('posts', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('body', String(length=140)),
Column('timestamp', DateTime),
Column('user_id', Integer),
)
users = Table('users', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('nickname', String(length=64)),
Column('email', String(length=120)),
Column('role', SmallInteger, default=ColumnDefault(0)),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
pre_meta.tables['user'].drop()
post_meta.tables['posts'].create()
post_meta.tables['users'].create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
pre_meta.tables['user'].create()
post_meta.tables['posts'].drop()
post_meta.tables['users'].drop()
|
zhangpf/vbox
|
refs/heads/master
|
src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/UPT/BuildVersion.py
|
11
|
## @file
#
# This file is for build version number auto generation
#
# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
'''
Build version information
'''
gBUILD_VERSION = "Build 2524"
|
DanieSchro3/Daniel
|
refs/heads/master
|
py/openage/convert/gamedata/sound.py
|
46
|
from .. import dataformat
from struct import Struct, unpack_from
from ..util import dbg, zstr
from .empiresdat import endianness
class SoundItem(dataformat.Exportable):
name_struct = "sound_item"
name_struct_file = "sound"
struct_description = "one possible file for a sound."
data_format = (
(dataformat.READ_EXPORT, "filename", "char[13]"),
(dataformat.READ_EXPORT, "resource_id", "int32_t"),
(dataformat.READ_EXPORT, "probablilty", "int16_t"),
(dataformat.READ_EXPORT, "civilisation", "int16_t"),
(dataformat.READ_UNKNOWN, None, "int16_t"),
)
def __init__(self):
super().__init__()
class Sound(dataformat.Exportable):
name_struct = "sound"
name_struct_file = "sound"
struct_description = "describes a sound, consisting of several sound items."
data_format = (
(dataformat.READ_EXPORT, "id", "int32_t"),
(dataformat.READ_EXPORT, "item_count", "uint16_t"),
(dataformat.READ_UNKNOWN, None, "int32_t"),
(dataformat.READ_EXPORT, "sound_items", dataformat.SubdataMember(
ref_type=SoundItem,
ref_to="id",
length="item_count",
)),
)
def __init__(self):
super().__init__()
|
google/prog-edu-assistant
|
refs/heads/main
|
python/utils.py
|
1
|
"""utility libraries for prog-edu-assistant."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from IPython.core import ultratb
# pylint: disable=too-few-public-methods
class TbFormatter:
'''TbFormatter formats tracebacks of exceptions using IPython formatter.'''
def __init__(self, tb_offset=0, color='Neutral'):
self.syntax_tb = ultratb.SyntaxTB(color_scheme=color)
self.interactive_tb = ultratb.AutoFormattedTB(mode='Context',
color_scheme=color,
tb_offset=tb_offset)
def format(self, etype, value, traceback):
'''Formats the output of sys.exc_info.'''
if etype == SyntaxError:
return self.syntax_tb.stb2text(
self.syntax_tb.structured_traceback(etype, value, []))
return self.interactive_tb.stb2text(
self.interactive_tb.structured_traceback(etype, value, traceback))
|
wdv4758h/ZipPy
|
refs/heads/master
|
edu.uci.python.benchmark/src/benchmarks/sympy/sympy/printing/python.py
|
118
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import keyword as kw
import sympy
from .repr import ReprPrinter
from .str import StrPrinter
# A list of classes that should be printed using StrPrinter
STRPRINT = ("Add", "Infinity", "Integer", "Mul", "NegativeInfinity",
"Pow", "Zero")
class PythonPrinter(ReprPrinter, StrPrinter):
"""A printer which converts an expression into its Python interpretation."""
def __init__(self, settings=None):
ReprPrinter.__init__(self)
StrPrinter.__init__(self, settings)
self.symbols = []
self.functions = []
# Create print methods for classes that should use StrPrinter instead
# of ReprPrinter.
for name in STRPRINT:
f_name = "_print_%s" % name
f = getattr(StrPrinter, f_name)
setattr(PythonPrinter, f_name, f)
def _print_Function(self, expr):
func = expr.func.__name__
if not hasattr(sympy, func) and not func in self.functions:
self.functions.append(func)
return StrPrinter._print_Function(self, expr)
# procedure (!) for defining symbols which have be defined in print_python()
def _print_Symbol(self, expr):
symbol = self._str(expr)
if symbol not in self.symbols:
self.symbols.append(symbol)
return StrPrinter._print_Symbol(self, expr)
def _print_module(self, expr):
raise ValueError('Modules in the expression are unacceptable')
def python(expr, **settings):
"""Return Python interpretation of passed expression
(can be passed to the exec() function without any modifications)"""
printer = PythonPrinter(settings)
exprp = printer.doprint(expr)
result = ''
# Returning found symbols and functions
renamings = {}
for symbolname in printer.symbols:
newsymbolname = symbolname
# Escape symbol names that are reserved python keywords
if kw.iskeyword(newsymbolname):
while True:
newsymbolname += "_"
if (newsymbolname not in printer.symbols and
newsymbolname not in printer.functions):
renamings[sympy.Symbol(
symbolname)] = sympy.Symbol(newsymbolname)
break
result += newsymbolname + ' = Symbol(\'' + symbolname + '\')\n'
for functionname in printer.functions:
newfunctionname = functionname
# Escape function names that are reserved python keywords
if kw.iskeyword(newfunctionname):
while True:
newfunctionname += "_"
if (newfunctionname not in printer.symbols and
newfunctionname not in printer.functions):
renamings[sympy.Function(
functionname)] = sympy.Function(newfunctionname)
break
result += newfunctionname + ' = Function(\'' + functionname + '\')\n'
if not len(renamings) == 0:
exprp = expr.subs(renamings)
result += 'e = ' + printer._str(exprp)
return result
def print_python(expr, **settings):
"""Print output of python() function"""
print(python(expr, **settings))
|
rice-solar-physics/ebtelPlusPlus
|
refs/heads/master
|
tests/conftest.py
|
1
|
import pytest
def pytest_addoption(parser):
parser.addoption(
"--ebtel_idl_path", action="store", default=None, help="Path to EBTEL IDL code"
)
@pytest.fixture
def ebtel_idl_path(request):
return request.config.getoption("--ebtel_idl_path")
|
karimbahgat/Pipy
|
refs/heads/master
|
pipy/pip/_vendor/requests/packages/chardet/euctwfreq.py
|
3132
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# EUCTW frequency table
# Converted from big5 work
# by Taiwan's Mandarin Promotion Council
# <http:#www.edu.tw:81/mandr/>
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Idea Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75
# Char to FreqOrder table ,
EUCTW_TABLE_SIZE = 8102
EUCTWCharToFreqOrder = (
1,1800,1506, 255,1431, 198, 9, 82, 6,7310, 177, 202,3615,1256,2808, 110, # 2742
3735, 33,3241, 261, 76, 44,2113, 16,2931,2184,1176, 659,3868, 26,3404,2643, # 2758
1198,3869,3313,4060, 410,2211, 302, 590, 361,1963, 8, 204, 58,4296,7311,1931, # 2774
63,7312,7313, 317,1614, 75, 222, 159,4061,2412,1480,7314,3500,3068, 224,2809, # 2790
3616, 3, 10,3870,1471, 29,2774,1135,2852,1939, 873, 130,3242,1123, 312,7315, # 2806
4297,2051, 507, 252, 682,7316, 142,1914, 124, 206,2932, 34,3501,3173, 64, 604, # 2822
7317,2494,1976,1977, 155,1990, 645, 641,1606,7318,3405, 337, 72, 406,7319, 80, # 2838
630, 238,3174,1509, 263, 939,1092,2644, 756,1440,1094,3406, 449, 69,2969, 591, # 2854
179,2095, 471, 115,2034,1843, 60, 50,2970, 134, 806,1868, 734,2035,3407, 180, # 2870
995,1607, 156, 537,2893, 688,7320, 319,1305, 779,2144, 514,2374, 298,4298, 359, # 2886
2495, 90,2707,1338, 663, 11, 906,1099,2545, 20,2436, 182, 532,1716,7321, 732, # 2902
1376,4062,1311,1420,3175, 25,2312,1056, 113, 399, 382,1949, 242,3408,2467, 529, # 2918
3243, 475,1447,3617,7322, 117, 21, 656, 810,1297,2295,2329,3502,7323, 126,4063, # 2934
706, 456, 150, 613,4299, 71,1118,2036,4064, 145,3069, 85, 835, 486,2114,1246, # 2950
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,7324,2127,2354, 347,3736, 221, # 2966
3503,3110,7325,1955,1153,4065, 83, 296,1199,3070, 192, 624, 93,7326, 822,1897, # 2982
2810,3111, 795,2064, 991,1554,1542,1592, 27, 43,2853, 859, 139,1456, 860,4300, # 2998
437, 712,3871, 164,2392,3112, 695, 211,3017,2096, 195,3872,1608,3504,3505,3618, # 3014
3873, 234, 811,2971,2097,3874,2229,1441,3506,1615,2375, 668,2076,1638, 305, 228, # 3030
1664,4301, 467, 415,7327, 262,2098,1593, 239, 108, 300, 200,1033, 512,1247,2077, # 3046
7328,7329,2173,3176,3619,2673, 593, 845,1062,3244, 88,1723,2037,3875,1950, 212, # 3062
266, 152, 149, 468,1898,4066,4302, 77, 187,7330,3018, 37, 5,2972,7331,3876, # 3078
7332,7333, 39,2517,4303,2894,3177,2078, 55, 148, 74,4304, 545, 483,1474,1029, # 3094
1665, 217,1869,1531,3113,1104,2645,4067, 24, 172,3507, 900,3877,3508,3509,4305, # 3110
32,1408,2811,1312, 329, 487,2355,2247,2708, 784,2674, 4,3019,3314,1427,1788, # 3126
188, 109, 499,7334,3620,1717,1789, 888,1217,3020,4306,7335,3510,7336,3315,1520, # 3142
3621,3878, 196,1034, 775,7337,7338, 929,1815, 249, 439, 38,7339,1063,7340, 794, # 3158
3879,1435,2296, 46, 178,3245,2065,7341,2376,7342, 214,1709,4307, 804, 35, 707, # 3174
324,3622,1601,2546, 140, 459,4068,7343,7344,1365, 839, 272, 978,2257,2572,3409, # 3190
2128,1363,3623,1423, 697, 100,3071, 48, 70,1231, 495,3114,2193,7345,1294,7346, # 3206
2079, 462, 586,1042,3246, 853, 256, 988, 185,2377,3410,1698, 434,1084,7347,3411, # 3222
314,2615,2775,4308,2330,2331, 569,2280, 637,1816,2518, 757,1162,1878,1616,3412, # 3238
287,1577,2115, 768,4309,1671,2854,3511,2519,1321,3737, 909,2413,7348,4069, 933, # 3254
3738,7349,2052,2356,1222,4310, 765,2414,1322, 786,4311,7350,1919,1462,1677,2895, # 3270
1699,7351,4312,1424,2437,3115,3624,2590,3316,1774,1940,3413,3880,4070, 309,1369, # 3286
1130,2812, 364,2230,1653,1299,3881,3512,3882,3883,2646, 525,1085,3021, 902,2000, # 3302
1475, 964,4313, 421,1844,1415,1057,2281, 940,1364,3116, 376,4314,4315,1381, 7, # 3318
2520, 983,2378, 336,1710,2675,1845, 321,3414, 559,1131,3022,2742,1808,1132,1313, # 3334
265,1481,1857,7352, 352,1203,2813,3247, 167,1089, 420,2814, 776, 792,1724,3513, # 3350
4071,2438,3248,7353,4072,7354, 446, 229, 333,2743, 901,3739,1200,1557,4316,2647, # 3366
1920, 395,2744,2676,3740,4073,1835, 125, 916,3178,2616,4317,7355,7356,3741,7357, # 3382
7358,7359,4318,3117,3625,1133,2547,1757,3415,1510,2313,1409,3514,7360,2145, 438, # 3398
2591,2896,2379,3317,1068, 958,3023, 461, 311,2855,2677,4074,1915,3179,4075,1978, # 3414
383, 750,2745,2617,4076, 274, 539, 385,1278,1442,7361,1154,1964, 384, 561, 210, # 3430
98,1295,2548,3515,7362,1711,2415,1482,3416,3884,2897,1257, 129,7363,3742, 642, # 3446
523,2776,2777,2648,7364, 141,2231,1333, 68, 176, 441, 876, 907,4077, 603,2592, # 3462
710, 171,3417, 404, 549, 18,3118,2393,1410,3626,1666,7365,3516,4319,2898,4320, # 3478
7366,2973, 368,7367, 146, 366, 99, 871,3627,1543, 748, 807,1586,1185, 22,2258, # 3494
379,3743,3180,7368,3181, 505,1941,2618,1991,1382,2314,7369, 380,2357, 218, 702, # 3510
1817,1248,3418,3024,3517,3318,3249,7370,2974,3628, 930,3250,3744,7371, 59,7372, # 3526
585, 601,4078, 497,3419,1112,1314,4321,1801,7373,1223,1472,2174,7374, 749,1836, # 3542
690,1899,3745,1772,3885,1476, 429,1043,1790,2232,2116, 917,4079, 447,1086,1629, # 3558
7375, 556,7376,7377,2020,1654, 844,1090, 105, 550, 966,1758,2815,1008,1782, 686, # 3574
1095,7378,2282, 793,1602,7379,3518,2593,4322,4080,2933,2297,4323,3746, 980,2496, # 3590
544, 353, 527,4324, 908,2678,2899,7380, 381,2619,1942,1348,7381,1341,1252, 560, # 3606
3072,7382,3420,2856,7383,2053, 973, 886,2080, 143,4325,7384,7385, 157,3886, 496, # 3622
4081, 57, 840, 540,2038,4326,4327,3421,2117,1445, 970,2259,1748,1965,2081,4082, # 3638
3119,1234,1775,3251,2816,3629, 773,1206,2129,1066,2039,1326,3887,1738,1725,4083, # 3654
279,3120, 51,1544,2594, 423,1578,2130,2066, 173,4328,1879,7386,7387,1583, 264, # 3670
610,3630,4329,2439, 280, 154,7388,7389,7390,1739, 338,1282,3073, 693,2857,1411, # 3686
1074,3747,2440,7391,4330,7392,7393,1240, 952,2394,7394,2900,1538,2679, 685,1483, # 3702
4084,2468,1436, 953,4085,2054,4331, 671,2395, 79,4086,2441,3252, 608, 567,2680, # 3718
3422,4087,4088,1691, 393,1261,1791,2396,7395,4332,7396,7397,7398,7399,1383,1672, # 3734
3748,3182,1464, 522,1119, 661,1150, 216, 675,4333,3888,1432,3519, 609,4334,2681, # 3750
2397,7400,7401,7402,4089,3025, 0,7403,2469, 315, 231,2442, 301,3319,4335,2380, # 3766
7404, 233,4090,3631,1818,4336,4337,7405, 96,1776,1315,2082,7406, 257,7407,1809, # 3782
3632,2709,1139,1819,4091,2021,1124,2163,2778,1777,2649,7408,3074, 363,1655,3183, # 3798
7409,2975,7410,7411,7412,3889,1567,3890, 718, 103,3184, 849,1443, 341,3320,2934, # 3814
1484,7413,1712, 127, 67, 339,4092,2398, 679,1412, 821,7414,7415, 834, 738, 351, # 3830
2976,2146, 846, 235,1497,1880, 418,1992,3749,2710, 186,1100,2147,2746,3520,1545, # 3846
1355,2935,2858,1377, 583,3891,4093,2573,2977,7416,1298,3633,1078,2549,3634,2358, # 3862
78,3750,3751, 267,1289,2099,2001,1594,4094, 348, 369,1274,2194,2175,1837,4338, # 3878
1820,2817,3635,2747,2283,2002,4339,2936,2748, 144,3321, 882,4340,3892,2749,3423, # 3894
4341,2901,7417,4095,1726, 320,7418,3893,3026, 788,2978,7419,2818,1773,1327,2859, # 3910
3894,2819,7420,1306,4342,2003,1700,3752,3521,2359,2650, 787,2022, 506, 824,3636, # 3926
534, 323,4343,1044,3322,2023,1900, 946,3424,7421,1778,1500,1678,7422,1881,4344, # 3942
165, 243,4345,3637,2521, 123, 683,4096, 764,4346, 36,3895,1792, 589,2902, 816, # 3958
626,1667,3027,2233,1639,1555,1622,3753,3896,7423,3897,2860,1370,1228,1932, 891, # 3974
2083,2903, 304,4097,7424, 292,2979,2711,3522, 691,2100,4098,1115,4347, 118, 662, # 3990
7425, 611,1156, 854,2381,1316,2861, 2, 386, 515,2904,7426,7427,3253, 868,2234, # 4006
1486, 855,2651, 785,2212,3028,7428,1040,3185,3523,7429,3121, 448,7430,1525,7431, # 4022
2164,4348,7432,3754,7433,4099,2820,3524,3122, 503, 818,3898,3123,1568, 814, 676, # 4038
1444, 306,1749,7434,3755,1416,1030, 197,1428, 805,2821,1501,4349,7435,7436,7437, # 4054
1993,7438,4350,7439,7440,2195, 13,2779,3638,2980,3124,1229,1916,7441,3756,2131, # 4070
7442,4100,4351,2399,3525,7443,2213,1511,1727,1120,7444,7445, 646,3757,2443, 307, # 4086
7446,7447,1595,3186,7448,7449,7450,3639,1113,1356,3899,1465,2522,2523,7451, 519, # 4102
7452, 128,2132, 92,2284,1979,7453,3900,1512, 342,3125,2196,7454,2780,2214,1980, # 4118
3323,7455, 290,1656,1317, 789, 827,2360,7456,3758,4352, 562, 581,3901,7457, 401, # 4134
4353,2248, 94,4354,1399,2781,7458,1463,2024,4355,3187,1943,7459, 828,1105,4101, # 4150
1262,1394,7460,4102, 605,4356,7461,1783,2862,7462,2822, 819,2101, 578,2197,2937, # 4166
7463,1502, 436,3254,4103,3255,2823,3902,2905,3425,3426,7464,2712,2315,7465,7466, # 4182
2332,2067, 23,4357, 193, 826,3759,2102, 699,1630,4104,3075, 390,1793,1064,3526, # 4198
7467,1579,3076,3077,1400,7468,4105,1838,1640,2863,7469,4358,4359, 137,4106, 598, # 4214
3078,1966, 780, 104, 974,2938,7470, 278, 899, 253, 402, 572, 504, 493,1339,7471, # 4230
3903,1275,4360,2574,2550,7472,3640,3029,3079,2249, 565,1334,2713, 863, 41,7473, # 4246
7474,4361,7475,1657,2333, 19, 463,2750,4107, 606,7476,2981,3256,1087,2084,1323, # 4262
2652,2982,7477,1631,1623,1750,4108,2682,7478,2864, 791,2714,2653,2334, 232,2416, # 4278
7479,2983,1498,7480,2654,2620, 755,1366,3641,3257,3126,2025,1609, 119,1917,3427, # 4294
862,1026,4109,7481,3904,3760,4362,3905,4363,2260,1951,2470,7482,1125, 817,4110, # 4310
4111,3906,1513,1766,2040,1487,4112,3030,3258,2824,3761,3127,7483,7484,1507,7485, # 4326
2683, 733, 40,1632,1106,2865, 345,4113, 841,2524, 230,4364,2984,1846,3259,3428, # 4342
7486,1263, 986,3429,7487, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562,3907, # 4358
3908,2939, 967,2751,2655,1349, 592,2133,1692,3324,2985,1994,4114,1679,3909,1901, # 4374
2185,7488, 739,3642,2715,1296,1290,7489,4115,2198,2199,1921,1563,2595,2551,1870, # 4390
2752,2986,7490, 435,7491, 343,1108, 596, 17,1751,4365,2235,3430,3643,7492,4366, # 4406
294,3527,2940,1693, 477, 979, 281,2041,3528, 643,2042,3644,2621,2782,2261,1031, # 4422
2335,2134,2298,3529,4367, 367,1249,2552,7493,3530,7494,4368,1283,3325,2004, 240, # 4438
1762,3326,4369,4370, 836,1069,3128, 474,7495,2148,2525, 268,3531,7496,3188,1521, # 4454
1284,7497,1658,1546,4116,7498,3532,3533,7499,4117,3327,2684,1685,4118, 961,1673, # 4470
2622, 190,2005,2200,3762,4371,4372,7500, 570,2497,3645,1490,7501,4373,2623,3260, # 4486
1956,4374, 584,1514, 396,1045,1944,7502,4375,1967,2444,7503,7504,4376,3910, 619, # 4502
7505,3129,3261, 215,2006,2783,2553,3189,4377,3190,4378, 763,4119,3763,4379,7506, # 4518
7507,1957,1767,2941,3328,3646,1174, 452,1477,4380,3329,3130,7508,2825,1253,2382, # 4534
2186,1091,2285,4120, 492,7509, 638,1169,1824,2135,1752,3911, 648, 926,1021,1324, # 4550
4381, 520,4382, 997, 847,1007, 892,4383,3764,2262,1871,3647,7510,2400,1784,4384, # 4566
1952,2942,3080,3191,1728,4121,2043,3648,4385,2007,1701,3131,1551, 30,2263,4122, # 4582
7511,2026,4386,3534,7512, 501,7513,4123, 594,3431,2165,1821,3535,3432,3536,3192, # 4598
829,2826,4124,7514,1680,3132,1225,4125,7515,3262,4387,4126,3133,2336,7516,4388, # 4614
4127,7517,3912,3913,7518,1847,2383,2596,3330,7519,4389, 374,3914, 652,4128,4129, # 4630
375,1140, 798,7520,7521,7522,2361,4390,2264, 546,1659, 138,3031,2445,4391,7523, # 4646
2250, 612,1848, 910, 796,3765,1740,1371, 825,3766,3767,7524,2906,2554,7525, 692, # 4662
444,3032,2624, 801,4392,4130,7526,1491, 244,1053,3033,4131,4132, 340,7527,3915, # 4678
1041,2987, 293,1168, 87,1357,7528,1539, 959,7529,2236, 721, 694,4133,3768, 219, # 4694
1478, 644,1417,3331,2656,1413,1401,1335,1389,3916,7530,7531,2988,2362,3134,1825, # 4710
730,1515, 184,2827, 66,4393,7532,1660,2943, 246,3332, 378,1457, 226,3433, 975, # 4726
3917,2944,1264,3537, 674, 696,7533, 163,7534,1141,2417,2166, 713,3538,3333,4394, # 4742
3918,7535,7536,1186, 15,7537,1079,1070,7538,1522,3193,3539, 276,1050,2716, 758, # 4758
1126, 653,2945,3263,7539,2337, 889,3540,3919,3081,2989, 903,1250,4395,3920,3434, # 4774
3541,1342,1681,1718, 766,3264, 286, 89,2946,3649,7540,1713,7541,2597,3334,2990, # 4790
7542,2947,2215,3194,2866,7543,4396,2498,2526, 181, 387,1075,3921, 731,2187,3335, # 4806
7544,3265, 310, 313,3435,2299, 770,4134, 54,3034, 189,4397,3082,3769,3922,7545, # 4822
1230,1617,1849, 355,3542,4135,4398,3336, 111,4136,3650,1350,3135,3436,3035,4137, # 4838
2149,3266,3543,7546,2784,3923,3924,2991, 722,2008,7547,1071, 247,1207,2338,2471, # 4854
1378,4399,2009, 864,1437,1214,4400, 373,3770,1142,2216, 667,4401, 442,2753,2555, # 4870
3771,3925,1968,4138,3267,1839, 837, 170,1107, 934,1336,1882,7548,7549,2118,4139, # 4886
2828, 743,1569,7550,4402,4140, 582,2384,1418,3437,7551,1802,7552, 357,1395,1729, # 4902
3651,3268,2418,1564,2237,7553,3083,3772,1633,4403,1114,2085,4141,1532,7554, 482, # 4918
2446,4404,7555,7556,1492, 833,1466,7557,2717,3544,1641,2829,7558,1526,1272,3652, # 4934
4142,1686,1794, 416,2556,1902,1953,1803,7559,3773,2785,3774,1159,2316,7560,2867, # 4950
4405,1610,1584,3036,2419,2754, 443,3269,1163,3136,7561,7562,3926,7563,4143,2499, # 4966
3037,4406,3927,3137,2103,1647,3545,2010,1872,4144,7564,4145, 431,3438,7565, 250, # 4982
97, 81,4146,7566,1648,1850,1558, 160, 848,7567, 866, 740,1694,7568,2201,2830, # 4998
3195,4147,4407,3653,1687, 950,2472, 426, 469,3196,3654,3655,3928,7569,7570,1188, # 5014
424,1995, 861,3546,4148,3775,2202,2685, 168,1235,3547,4149,7571,2086,1674,4408, # 5030
3337,3270, 220,2557,1009,7572,3776, 670,2992, 332,1208, 717,7573,7574,3548,2447, # 5046
3929,3338,7575, 513,7576,1209,2868,3339,3138,4409,1080,7577,7578,7579,7580,2527, # 5062
3656,3549, 815,1587,3930,3931,7581,3550,3439,3777,1254,4410,1328,3038,1390,3932, # 5078
1741,3933,3778,3934,7582, 236,3779,2448,3271,7583,7584,3657,3780,1273,3781,4411, # 5094
7585, 308,7586,4412, 245,4413,1851,2473,1307,2575, 430, 715,2136,2449,7587, 270, # 5110
199,2869,3935,7588,3551,2718,1753, 761,1754, 725,1661,1840,4414,3440,3658,7589, # 5126
7590, 587, 14,3272, 227,2598, 326, 480,2265, 943,2755,3552, 291, 650,1883,7591, # 5142
1702,1226, 102,1547, 62,3441, 904,4415,3442,1164,4150,7592,7593,1224,1548,2756, # 5158
391, 498,1493,7594,1386,1419,7595,2055,1177,4416, 813, 880,1081,2363, 566,1145, # 5174
4417,2286,1001,1035,2558,2599,2238, 394,1286,7596,7597,2068,7598, 86,1494,1730, # 5190
3936, 491,1588, 745, 897,2948, 843,3340,3937,2757,2870,3273,1768, 998,2217,2069, # 5206
397,1826,1195,1969,3659,2993,3341, 284,7599,3782,2500,2137,2119,1903,7600,3938, # 5222
2150,3939,4151,1036,3443,1904, 114,2559,4152, 209,1527,7601,7602,2949,2831,2625, # 5238
2385,2719,3139, 812,2560,7603,3274,7604,1559, 737,1884,3660,1210, 885, 28,2686, # 5254
3553,3783,7605,4153,1004,1779,4418,7606, 346,1981,2218,2687,4419,3784,1742, 797, # 5270
1642,3940,1933,1072,1384,2151, 896,3941,3275,3661,3197,2871,3554,7607,2561,1958, # 5286
4420,2450,1785,7608,7609,7610,3942,4154,1005,1308,3662,4155,2720,4421,4422,1528, # 5302
2600, 161,1178,4156,1982, 987,4423,1101,4157, 631,3943,1157,3198,2420,1343,1241, # 5318
1016,2239,2562, 372, 877,2339,2501,1160, 555,1934, 911,3944,7611, 466,1170, 169, # 5334
1051,2907,2688,3663,2474,2994,1182,2011,2563,1251,2626,7612, 992,2340,3444,1540, # 5350
2721,1201,2070,2401,1996,2475,7613,4424, 528,1922,2188,1503,1873,1570,2364,3342, # 5366
3276,7614, 557,1073,7615,1827,3445,2087,2266,3140,3039,3084, 767,3085,2786,4425, # 5382
1006,4158,4426,2341,1267,2176,3664,3199, 778,3945,3200,2722,1597,2657,7616,4427, # 5398
7617,3446,7618,7619,7620,3277,2689,1433,3278, 131, 95,1504,3946, 723,4159,3141, # 5414
1841,3555,2758,2189,3947,2027,2104,3665,7621,2995,3948,1218,7622,3343,3201,3949, # 5430
4160,2576, 248,1634,3785, 912,7623,2832,3666,3040,3786, 654, 53,7624,2996,7625, # 5446
1688,4428, 777,3447,1032,3950,1425,7626, 191, 820,2120,2833, 971,4429, 931,3202, # 5462
135, 664, 783,3787,1997, 772,2908,1935,3951,3788,4430,2909,3203, 282,2723, 640, # 5478
1372,3448,1127, 922, 325,3344,7627,7628, 711,2044,7629,7630,3952,2219,2787,1936, # 5494
3953,3345,2220,2251,3789,2300,7631,4431,3790,1258,3279,3954,3204,2138,2950,3955, # 5510
3956,7632,2221, 258,3205,4432, 101,1227,7633,3280,1755,7634,1391,3281,7635,2910, # 5526
2056, 893,7636,7637,7638,1402,4161,2342,7639,7640,3206,3556,7641,7642, 878,1325, # 5542
1780,2788,4433, 259,1385,2577, 744,1183,2267,4434,7643,3957,2502,7644, 684,1024, # 5558
4162,7645, 472,3557,3449,1165,3282,3958,3959, 322,2152, 881, 455,1695,1152,1340, # 5574
660, 554,2153,4435,1058,4436,4163, 830,1065,3346,3960,4437,1923,7646,1703,1918, # 5590
7647, 932,2268, 122,7648,4438, 947, 677,7649,3791,2627, 297,1905,1924,2269,4439, # 5606
2317,3283,7650,7651,4164,7652,4165, 84,4166, 112, 989,7653, 547,1059,3961, 701, # 5622
3558,1019,7654,4167,7655,3450, 942, 639, 457,2301,2451, 993,2951, 407, 851, 494, # 5638
4440,3347, 927,7656,1237,7657,2421,3348, 573,4168, 680, 921,2911,1279,1874, 285, # 5654
790,1448,1983, 719,2167,7658,7659,4441,3962,3963,1649,7660,1541, 563,7661,1077, # 5670
7662,3349,3041,3451, 511,2997,3964,3965,3667,3966,1268,2564,3350,3207,4442,4443, # 5686
7663, 535,1048,1276,1189,2912,2028,3142,1438,1373,2834,2952,1134,2012,7664,4169, # 5702
1238,2578,3086,1259,7665, 700,7666,2953,3143,3668,4170,7667,4171,1146,1875,1906, # 5718
4444,2601,3967, 781,2422, 132,1589, 203, 147, 273,2789,2402, 898,1786,2154,3968, # 5734
3969,7668,3792,2790,7669,7670,4445,4446,7671,3208,7672,1635,3793, 965,7673,1804, # 5750
2690,1516,3559,1121,1082,1329,3284,3970,1449,3794, 65,1128,2835,2913,2759,1590, # 5766
3795,7674,7675, 12,2658, 45, 976,2579,3144,4447, 517,2528,1013,1037,3209,7676, # 5782
3796,2836,7677,3797,7678,3452,7679,2602, 614,1998,2318,3798,3087,2724,2628,7680, # 5798
2580,4172, 599,1269,7681,1810,3669,7682,2691,3088, 759,1060, 489,1805,3351,3285, # 5814
1358,7683,7684,2386,1387,1215,2629,2252, 490,7685,7686,4173,1759,2387,2343,7687, # 5830
4448,3799,1907,3971,2630,1806,3210,4449,3453,3286,2760,2344, 874,7688,7689,3454, # 5846
3670,1858, 91,2914,3671,3042,3800,4450,7690,3145,3972,2659,7691,3455,1202,1403, # 5862
3801,2954,2529,1517,2503,4451,3456,2504,7692,4452,7693,2692,1885,1495,1731,3973, # 5878
2365,4453,7694,2029,7695,7696,3974,2693,1216, 237,2581,4174,2319,3975,3802,4454, # 5894
4455,2694,3560,3457, 445,4456,7697,7698,7699,7700,2761, 61,3976,3672,1822,3977, # 5910
7701, 687,2045, 935, 925, 405,2660, 703,1096,1859,2725,4457,3978,1876,1367,2695, # 5926
3352, 918,2105,1781,2476, 334,3287,1611,1093,4458, 564,3146,3458,3673,3353, 945, # 5942
2631,2057,4459,7702,1925, 872,4175,7703,3459,2696,3089, 349,4176,3674,3979,4460, # 5958
3803,4177,3675,2155,3980,4461,4462,4178,4463,2403,2046, 782,3981, 400, 251,4179, # 5974
1624,7704,7705, 277,3676, 299,1265, 476,1191,3804,2121,4180,4181,1109, 205,7706, # 5990
2582,1000,2156,3561,1860,7707,7708,7709,4464,7710,4465,2565, 107,2477,2157,3982, # 6006
3460,3147,7711,1533, 541,1301, 158, 753,4182,2872,3562,7712,1696, 370,1088,4183, # 6022
4466,3563, 579, 327, 440, 162,2240, 269,1937,1374,3461, 968,3043, 56,1396,3090, # 6038
2106,3288,3354,7713,1926,2158,4467,2998,7714,3564,7715,7716,3677,4468,2478,7717, # 6054
2791,7718,1650,4469,7719,2603,7720,7721,3983,2661,3355,1149,3356,3984,3805,3985, # 6070
7722,1076, 49,7723, 951,3211,3289,3290, 450,2837, 920,7724,1811,2792,2366,4184, # 6086
1908,1138,2367,3806,3462,7725,3212,4470,1909,1147,1518,2423,4471,3807,7726,4472, # 6102
2388,2604, 260,1795,3213,7727,7728,3808,3291, 708,7729,3565,1704,7730,3566,1351, # 6118
1618,3357,2999,1886, 944,4185,3358,4186,3044,3359,4187,7731,3678, 422, 413,1714, # 6134
3292, 500,2058,2345,4188,2479,7732,1344,1910, 954,7733,1668,7734,7735,3986,2404, # 6150
4189,3567,3809,4190,7736,2302,1318,2505,3091, 133,3092,2873,4473, 629, 31,2838, # 6166
2697,3810,4474, 850, 949,4475,3987,2955,1732,2088,4191,1496,1852,7737,3988, 620, # 6182
3214, 981,1242,3679,3360,1619,3680,1643,3293,2139,2452,1970,1719,3463,2168,7738, # 6198
3215,7739,7740,3361,1828,7741,1277,4476,1565,2047,7742,1636,3568,3093,7743, 869, # 6214
2839, 655,3811,3812,3094,3989,3000,3813,1310,3569,4477,7744,7745,7746,1733, 558, # 6230
4478,3681, 335,1549,3045,1756,4192,3682,1945,3464,1829,1291,1192, 470,2726,2107, # 6246
2793, 913,1054,3990,7747,1027,7748,3046,3991,4479, 982,2662,3362,3148,3465,3216, # 6262
3217,1946,2794,7749, 571,4480,7750,1830,7751,3570,2583,1523,2424,7752,2089, 984, # 6278
4481,3683,1959,7753,3684, 852, 923,2795,3466,3685, 969,1519, 999,2048,2320,1705, # 6294
7754,3095, 615,1662, 151, 597,3992,2405,2321,1049, 275,4482,3686,4193, 568,3687, # 6310
3571,2480,4194,3688,7755,2425,2270, 409,3218,7756,1566,2874,3467,1002, 769,2840, # 6326
194,2090,3149,3689,2222,3294,4195, 628,1505,7757,7758,1763,2177,3001,3993, 521, # 6342
1161,2584,1787,2203,2406,4483,3994,1625,4196,4197, 412, 42,3096, 464,7759,2632, # 6358
4484,3363,1760,1571,2875,3468,2530,1219,2204,3814,2633,2140,2368,4485,4486,3295, # 6374
1651,3364,3572,7760,7761,3573,2481,3469,7762,3690,7763,7764,2271,2091, 460,7765, # 6390
4487,7766,3002, 962, 588,3574, 289,3219,2634,1116, 52,7767,3047,1796,7768,7769, # 6406
7770,1467,7771,1598,1143,3691,4198,1984,1734,1067,4488,1280,3365, 465,4489,1572, # 6422
510,7772,1927,2241,1812,1644,3575,7773,4490,3692,7774,7775,2663,1573,1534,7776, # 6438
7777,4199, 536,1807,1761,3470,3815,3150,2635,7778,7779,7780,4491,3471,2915,1911, # 6454
2796,7781,3296,1122, 377,3220,7782, 360,7783,7784,4200,1529, 551,7785,2059,3693, # 6470
1769,2426,7786,2916,4201,3297,3097,2322,2108,2030,4492,1404, 136,1468,1479, 672, # 6486
1171,3221,2303, 271,3151,7787,2762,7788,2049, 678,2727, 865,1947,4493,7789,2013, # 6502
3995,2956,7790,2728,2223,1397,3048,3694,4494,4495,1735,2917,3366,3576,7791,3816, # 6518
509,2841,2453,2876,3817,7792,7793,3152,3153,4496,4202,2531,4497,2304,1166,1010, # 6534
552, 681,1887,7794,7795,2957,2958,3996,1287,1596,1861,3154, 358, 453, 736, 175, # 6550
478,1117, 905,1167,1097,7796,1853,1530,7797,1706,7798,2178,3472,2287,3695,3473, # 6566
3577,4203,2092,4204,7799,3367,1193,2482,4205,1458,2190,2205,1862,1888,1421,3298, # 6582
2918,3049,2179,3474, 595,2122,7800,3997,7801,7802,4206,1707,2636, 223,3696,1359, # 6598
751,3098, 183,3475,7803,2797,3003, 419,2369, 633, 704,3818,2389, 241,7804,7805, # 6614
7806, 838,3004,3697,2272,2763,2454,3819,1938,2050,3998,1309,3099,2242,1181,7807, # 6630
1136,2206,3820,2370,1446,4207,2305,4498,7808,7809,4208,1055,2605, 484,3698,7810, # 6646
3999, 625,4209,2273,3368,1499,4210,4000,7811,4001,4211,3222,2274,2275,3476,7812, # 6662
7813,2764, 808,2606,3699,3369,4002,4212,3100,2532, 526,3370,3821,4213, 955,7814, # 6678
1620,4214,2637,2427,7815,1429,3700,1669,1831, 994, 928,7816,3578,1260,7817,7818, # 6694
7819,1948,2288, 741,2919,1626,4215,2729,2455, 867,1184, 362,3371,1392,7820,7821, # 6710
4003,4216,1770,1736,3223,2920,4499,4500,1928,2698,1459,1158,7822,3050,3372,2877, # 6726
1292,1929,2506,2842,3701,1985,1187,2071,2014,2607,4217,7823,2566,2507,2169,3702, # 6742
2483,3299,7824,3703,4501,7825,7826, 666,1003,3005,1022,3579,4218,7827,4502,1813, # 6758
2253, 574,3822,1603, 295,1535, 705,3823,4219, 283, 858, 417,7828,7829,3224,4503, # 6774
4504,3051,1220,1889,1046,2276,2456,4004,1393,1599, 689,2567, 388,4220,7830,2484, # 6790
802,7831,2798,3824,2060,1405,2254,7832,4505,3825,2109,1052,1345,3225,1585,7833, # 6806
809,7834,7835,7836, 575,2730,3477, 956,1552,1469,1144,2323,7837,2324,1560,2457, # 6822
3580,3226,4005, 616,2207,3155,2180,2289,7838,1832,7839,3478,4506,7840,1319,3704, # 6838
3705,1211,3581,1023,3227,1293,2799,7841,7842,7843,3826, 607,2306,3827, 762,2878, # 6854
1439,4221,1360,7844,1485,3052,7845,4507,1038,4222,1450,2061,2638,4223,1379,4508, # 6870
2585,7846,7847,4224,1352,1414,2325,2921,1172,7848,7849,3828,3829,7850,1797,1451, # 6886
7851,7852,7853,7854,2922,4006,4007,2485,2346, 411,4008,4009,3582,3300,3101,4509, # 6902
1561,2664,1452,4010,1375,7855,7856, 47,2959, 316,7857,1406,1591,2923,3156,7858, # 6918
1025,2141,3102,3157, 354,2731, 884,2224,4225,2407, 508,3706, 726,3583, 996,2428, # 6934
3584, 729,7859, 392,2191,1453,4011,4510,3707,7860,7861,2458,3585,2608,1675,2800, # 6950
919,2347,2960,2348,1270,4511,4012, 73,7862,7863, 647,7864,3228,2843,2255,1550, # 6966
1346,3006,7865,1332, 883,3479,7866,7867,7868,7869,3301,2765,7870,1212, 831,1347, # 6982
4226,4512,2326,3830,1863,3053, 720,3831,4513,4514,3832,7871,4227,7872,7873,4515, # 6998
7874,7875,1798,4516,3708,2609,4517,3586,1645,2371,7876,7877,2924, 669,2208,2665, # 7014
2429,7878,2879,7879,7880,1028,3229,7881,4228,2408,7882,2256,1353,7883,7884,4518, # 7030
3158, 518,7885,4013,7886,4229,1960,7887,2142,4230,7888,7889,3007,2349,2350,3833, # 7046
516,1833,1454,4014,2699,4231,4519,2225,2610,1971,1129,3587,7890,2766,7891,2961, # 7062
1422, 577,1470,3008,1524,3373,7892,7893, 432,4232,3054,3480,7894,2586,1455,2508, # 7078
2226,1972,1175,7895,1020,2732,4015,3481,4520,7896,2733,7897,1743,1361,3055,3482, # 7094
2639,4016,4233,4521,2290, 895, 924,4234,2170, 331,2243,3056, 166,1627,3057,1098, # 7110
7898,1232,2880,2227,3374,4522, 657, 403,1196,2372, 542,3709,3375,1600,4235,3483, # 7126
7899,4523,2767,3230, 576, 530,1362,7900,4524,2533,2666,3710,4017,7901, 842,3834, # 7142
7902,2801,2031,1014,4018, 213,2700,3376, 665, 621,4236,7903,3711,2925,2430,7904, # 7158
2431,3302,3588,3377,7905,4237,2534,4238,4525,3589,1682,4239,3484,1380,7906, 724, # 7174
2277, 600,1670,7907,1337,1233,4526,3103,2244,7908,1621,4527,7909, 651,4240,7910, # 7190
1612,4241,2611,7911,2844,7912,2734,2307,3058,7913, 716,2459,3059, 174,1255,2701, # 7206
4019,3590, 548,1320,1398, 728,4020,1574,7914,1890,1197,3060,4021,7915,3061,3062, # 7222
3712,3591,3713, 747,7916, 635,4242,4528,7917,7918,7919,4243,7920,7921,4529,7922, # 7238
3378,4530,2432, 451,7923,3714,2535,2072,4244,2735,4245,4022,7924,1764,4531,7925, # 7254
4246, 350,7926,2278,2390,2486,7927,4247,4023,2245,1434,4024, 488,4532, 458,4248, # 7270
4025,3715, 771,1330,2391,3835,2568,3159,2159,2409,1553,2667,3160,4249,7928,2487, # 7286
2881,2612,1720,2702,4250,3379,4533,7929,2536,4251,7930,3231,4252,2768,7931,2015, # 7302
2736,7932,1155,1017,3716,3836,7933,3303,2308, 201,1864,4253,1430,7934,4026,7935, # 7318
7936,7937,7938,7939,4254,1604,7940, 414,1865, 371,2587,4534,4535,3485,2016,3104, # 7334
4536,1708, 960,4255, 887, 389,2171,1536,1663,1721,7941,2228,4027,2351,2926,1580, # 7350
7942,7943,7944,1744,7945,2537,4537,4538,7946,4539,7947,2073,7948,7949,3592,3380, # 7366
2882,4256,7950,4257,2640,3381,2802, 673,2703,2460, 709,3486,4028,3593,4258,7951, # 7382
1148, 502, 634,7952,7953,1204,4540,3594,1575,4541,2613,3717,7954,3718,3105, 948, # 7398
3232, 121,1745,3837,1110,7955,4259,3063,2509,3009,4029,3719,1151,1771,3838,1488, # 7414
4030,1986,7956,2433,3487,7957,7958,2093,7959,4260,3839,1213,1407,2803, 531,2737, # 7430
2538,3233,1011,1537,7960,2769,4261,3106,1061,7961,3720,3721,1866,2883,7962,2017, # 7446
120,4262,4263,2062,3595,3234,2309,3840,2668,3382,1954,4542,7963,7964,3488,1047, # 7462
2704,1266,7965,1368,4543,2845, 649,3383,3841,2539,2738,1102,2846,2669,7966,7967, # 7478
1999,7968,1111,3596,2962,7969,2488,3842,3597,2804,1854,3384,3722,7970,7971,3385, # 7494
2410,2884,3304,3235,3598,7972,2569,7973,3599,2805,4031,1460, 856,7974,3600,7975, # 7510
2885,2963,7976,2886,3843,7977,4264, 632,2510, 875,3844,1697,3845,2291,7978,7979, # 7526
4544,3010,1239, 580,4545,4265,7980, 914, 936,2074,1190,4032,1039,2123,7981,7982, # 7542
7983,3386,1473,7984,1354,4266,3846,7985,2172,3064,4033, 915,3305,4267,4268,3306, # 7558
1605,1834,7986,2739, 398,3601,4269,3847,4034, 328,1912,2847,4035,3848,1331,4270, # 7574
3011, 937,4271,7987,3602,4036,4037,3387,2160,4546,3388, 524, 742, 538,3065,1012, # 7590
7988,7989,3849,2461,7990, 658,1103, 225,3850,7991,7992,4547,7993,4548,7994,3236, # 7606
1243,7995,4038, 963,2246,4549,7996,2705,3603,3161,7997,7998,2588,2327,7999,4550, # 7622
8000,8001,8002,3489,3307, 957,3389,2540,2032,1930,2927,2462, 870,2018,3604,1746, # 7638
2770,2771,2434,2463,8003,3851,8004,3723,3107,3724,3490,3390,3725,8005,1179,3066, # 7654
8006,3162,2373,4272,3726,2541,3163,3108,2740,4039,8007,3391,1556,2542,2292, 977, # 7670
2887,2033,4040,1205,3392,8008,1765,3393,3164,2124,1271,1689, 714,4551,3491,8009, # 7686
2328,3852, 533,4273,3605,2181, 617,8010,2464,3308,3492,2310,8011,8012,3165,8013, # 7702
8014,3853,1987, 618, 427,2641,3493,3394,8015,8016,1244,1690,8017,2806,4274,4552, # 7718
8018,3494,8019,8020,2279,1576, 473,3606,4275,3395, 972,8021,3607,8022,3067,8023, # 7734
8024,4553,4554,8025,3727,4041,4042,8026, 153,4555, 356,8027,1891,2888,4276,2143, # 7750
408, 803,2352,8028,3854,8029,4277,1646,2570,2511,4556,4557,3855,8030,3856,4278, # 7766
8031,2411,3396, 752,8032,8033,1961,2964,8034, 746,3012,2465,8035,4279,3728, 698, # 7782
4558,1892,4280,3608,2543,4559,3609,3857,8036,3166,3397,8037,1823,1302,4043,2706, # 7798
3858,1973,4281,8038,4282,3167, 823,1303,1288,1236,2848,3495,4044,3398, 774,3859, # 7814
8039,1581,4560,1304,2849,3860,4561,8040,2435,2161,1083,3237,4283,4045,4284, 344, # 7830
1173, 288,2311, 454,1683,8041,8042,1461,4562,4046,2589,8043,8044,4563, 985, 894, # 7846
8045,3399,3168,8046,1913,2928,3729,1988,8047,2110,1974,8048,4047,8049,2571,1194, # 7862
425,8050,4564,3169,1245,3730,4285,8051,8052,2850,8053, 636,4565,1855,3861, 760, # 7878
1799,8054,4286,2209,1508,4566,4048,1893,1684,2293,8055,8056,8057,4287,4288,2210, # 7894
479,8058,8059, 832,8060,4049,2489,8061,2965,2490,3731, 990,3109, 627,1814,2642, # 7910
4289,1582,4290,2125,2111,3496,4567,8062, 799,4291,3170,8063,4568,2112,1737,3013, # 7926
1018, 543, 754,4292,3309,1676,4569,4570,4050,8064,1489,8065,3497,8066,2614,2889, # 7942
4051,8067,8068,2966,8069,8070,8071,8072,3171,4571,4572,2182,1722,8073,3238,3239, # 7958
1842,3610,1715, 481, 365,1975,1856,8074,8075,1962,2491,4573,8076,2126,3611,3240, # 7974
433,1894,2063,2075,8077, 602,2741,8078,8079,8080,8081,8082,3014,1628,3400,8083, # 7990
3172,4574,4052,2890,4575,2512,8084,2544,2772,8085,8086,8087,3310,4576,2891,8088, # 8006
4577,8089,2851,4578,4579,1221,2967,4053,2513,8090,8091,8092,1867,1989,8093,8094, # 8022
8095,1895,8096,8097,4580,1896,4054, 318,8098,2094,4055,4293,8099,8100, 485,8101, # 8038
938,3862, 553,2670, 116,8102,3863,3612,8103,3498,2671,2773,3401,3311,2807,8104, # 8054
3613,2929,4056,1747,2930,2968,8105,8106, 207,8107,8108,2672,4581,2514,8109,3015, # 8070
890,3614,3864,8110,1877,3732,3402,8111,2183,2353,3403,1652,8112,8113,8114, 941, # 8086
2294, 208,3499,4057,2019, 330,4294,3865,2892,2492,3733,4295,8115,8116,8117,8118, # 8102
#Everything below is of no interest for detection purpose
2515,1613,4582,8119,3312,3866,2516,8120,4058,8121,1637,4059,2466,4583,3867,8122, # 8118
2493,3016,3734,8123,8124,2192,8125,8126,2162,8127,8128,8129,8130,8131,8132,8133, # 8134
8134,8135,8136,8137,8138,8139,8140,8141,8142,8143,8144,8145,8146,8147,8148,8149, # 8150
8150,8151,8152,8153,8154,8155,8156,8157,8158,8159,8160,8161,8162,8163,8164,8165, # 8166
8166,8167,8168,8169,8170,8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181, # 8182
8182,8183,8184,8185,8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197, # 8198
8198,8199,8200,8201,8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213, # 8214
8214,8215,8216,8217,8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229, # 8230
8230,8231,8232,8233,8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245, # 8246
8246,8247,8248,8249,8250,8251,8252,8253,8254,8255,8256,8257,8258,8259,8260,8261, # 8262
8262,8263,8264,8265,8266,8267,8268,8269,8270,8271,8272,8273,8274,8275,8276,8277, # 8278
8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,8290,8291,8292,8293, # 8294
8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,8308,8309, # 8310
8310,8311,8312,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322,8323,8324,8325, # 8326
8326,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337,8338,8339,8340,8341, # 8342
8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353,8354,8355,8356,8357, # 8358
8358,8359,8360,8361,8362,8363,8364,8365,8366,8367,8368,8369,8370,8371,8372,8373, # 8374
8374,8375,8376,8377,8378,8379,8380,8381,8382,8383,8384,8385,8386,8387,8388,8389, # 8390
8390,8391,8392,8393,8394,8395,8396,8397,8398,8399,8400,8401,8402,8403,8404,8405, # 8406
8406,8407,8408,8409,8410,8411,8412,8413,8414,8415,8416,8417,8418,8419,8420,8421, # 8422
8422,8423,8424,8425,8426,8427,8428,8429,8430,8431,8432,8433,8434,8435,8436,8437, # 8438
8438,8439,8440,8441,8442,8443,8444,8445,8446,8447,8448,8449,8450,8451,8452,8453, # 8454
8454,8455,8456,8457,8458,8459,8460,8461,8462,8463,8464,8465,8466,8467,8468,8469, # 8470
8470,8471,8472,8473,8474,8475,8476,8477,8478,8479,8480,8481,8482,8483,8484,8485, # 8486
8486,8487,8488,8489,8490,8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501, # 8502
8502,8503,8504,8505,8506,8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517, # 8518
8518,8519,8520,8521,8522,8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533, # 8534
8534,8535,8536,8537,8538,8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549, # 8550
8550,8551,8552,8553,8554,8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,8565, # 8566
8566,8567,8568,8569,8570,8571,8572,8573,8574,8575,8576,8577,8578,8579,8580,8581, # 8582
8582,8583,8584,8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597, # 8598
8598,8599,8600,8601,8602,8603,8604,8605,8606,8607,8608,8609,8610,8611,8612,8613, # 8614
8614,8615,8616,8617,8618,8619,8620,8621,8622,8623,8624,8625,8626,8627,8628,8629, # 8630
8630,8631,8632,8633,8634,8635,8636,8637,8638,8639,8640,8641,8642,8643,8644,8645, # 8646
8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,8657,8658,8659,8660,8661, # 8662
8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672,8673,8674,8675,8676,8677, # 8678
8678,8679,8680,8681,8682,8683,8684,8685,8686,8687,8688,8689,8690,8691,8692,8693, # 8694
8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,8704,8705,8706,8707,8708,8709, # 8710
8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,8720,8721,8722,8723,8724,8725, # 8726
8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,8736,8737,8738,8739,8740,8741) # 8742
# flake8: noqa
|
supergentle/migueltutorial
|
refs/heads/master
|
flask/lib/python2.7/site-packages/itsdangerous.py
|
626
|
# -*- coding: utf-8 -*-
"""
itsdangerous
~~~~~~~~~~~~
A module that implements various functions to deal with untrusted
sources. Mainly useful for web applications.
:copyright: (c) 2014 by Armin Ronacher and the Django Software Foundation.
:license: BSD, see LICENSE for more details.
"""
import sys
import hmac
import zlib
import time
import base64
import hashlib
import operator
from datetime import datetime
PY2 = sys.version_info[0] == 2
if PY2:
from itertools import izip
text_type = unicode
int_to_byte = chr
number_types = (int, long, float)
else:
from functools import reduce
izip = zip
text_type = str
int_to_byte = operator.methodcaller('to_bytes', 1, 'big')
number_types = (int, float)
try:
import simplejson as json
except ImportError:
import json
class _CompactJSON(object):
"""Wrapper around simplejson that strips whitespace.
"""
def loads(self, payload):
return json.loads(payload)
def dumps(self, obj):
return json.dumps(obj, separators=(',', ':'))
compact_json = _CompactJSON()
# 2011/01/01 in UTC
EPOCH = 1293840000
def want_bytes(s, encoding='utf-8', errors='strict'):
if isinstance(s, text_type):
s = s.encode(encoding, errors)
return s
def is_text_serializer(serializer):
"""Checks wheather a serializer generates text or binary."""
return isinstance(serializer.dumps({}), text_type)
# Starting with 3.3 the standard library has a c-implementation for
# constant time string compares.
_builtin_constant_time_compare = getattr(hmac, 'compare_digest', None)
def constant_time_compare(val1, val2):
"""Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match. Do
not use this function for anything else than comparision with known
length targets.
This is should be implemented in C in order to get it completely right.
"""
if _builtin_constant_time_compare is not None:
return _builtin_constant_time_compare(val1, val2)
len_eq = len(val1) == len(val2)
if len_eq:
result = 0
left = val1
else:
result = 1
left = val2
for x, y in izip(bytearray(left), bytearray(val2)):
result |= x ^ y
return result == 0
class BadData(Exception):
"""Raised if bad data of any sort was encountered. This is the
base for all exceptions that itsdangerous is currently using.
.. versionadded:: 0.15
"""
message = None
def __init__(self, message):
Exception.__init__(self, message)
self.message = message
def __str__(self):
return text_type(self.message)
if PY2:
__unicode__ = __str__
def __str__(self):
return self.__unicode__().encode('utf-8')
class BadPayload(BadData):
"""This error is raised in situations when payload is loaded without
checking the signature first and an exception happend as a result of
that. The original exception that caused that will be stored on the
exception as :attr:`original_error`.
This can also happen with a :class:`JSONWebSignatureSerializer` that
is subclassed and uses a different serializer for the payload than
the expected one.
.. versionadded:: 0.15
"""
def __init__(self, message, original_error=None):
BadData.__init__(self, message)
#: If available, the error that indicates why the payload
#: was not valid. This might be `None`.
self.original_error = original_error
class BadSignature(BadData):
"""This error is raised if a signature does not match. As of
itsdangerous 0.14 there are helpful attributes on the exception
instances. You can also catch down the baseclass :exc:`BadData`.
"""
def __init__(self, message, payload=None):
BadData.__init__(self, message)
#: The payload that failed the signature test. In some
#: situations you might still want to inspect this, even if
#: you know it was tampered with.
#:
#: .. versionadded:: 0.14
self.payload = payload
class BadTimeSignature(BadSignature):
"""Raised for time based signatures that fail. This is a subclass
of :class:`BadSignature` so you can catch those down as well.
"""
def __init__(self, message, payload=None, date_signed=None):
BadSignature.__init__(self, message, payload)
#: If the signature expired this exposes the date of when the
#: signature was created. This can be helpful in order to
#: tell the user how long a link has been gone stale.
#:
#: .. versionadded:: 0.14
self.date_signed = date_signed
class BadHeader(BadSignature):
"""Raised if a signed header is invalid in some form. This only
happens for serializers that have a header that goes with the
signature.
.. versionadded:: 0.24
"""
def __init__(self, message, payload=None, header=None,
original_error=None):
BadSignature.__init__(self, message, payload)
#: If the header is actually available but just malformed it
#: might be stored here.
self.header = header
#: If available, the error that indicates why the payload
#: was not valid. This might be `None`.
self.original_error = original_error
class SignatureExpired(BadTimeSignature):
"""Signature timestamp is older than required max_age. This is a
subclass of :exc:`BadTimeSignature` so you can use the baseclass for
catching the error.
"""
def base64_encode(string):
"""base64 encodes a single bytestring (and is tolerant to getting
called with a unicode string).
The resulting bytestring is safe for putting into URLs.
"""
string = want_bytes(string)
return base64.urlsafe_b64encode(string).strip(b'=')
def base64_decode(string):
"""base64 decodes a single bytestring (and is tolerant to getting
called with a unicode string).
The result is also a bytestring.
"""
string = want_bytes(string, encoding='ascii', errors='ignore')
return base64.urlsafe_b64decode(string + b'=' * (-len(string) % 4))
def int_to_bytes(num):
assert num >= 0
rv = []
while num:
rv.append(int_to_byte(num & 0xff))
num >>= 8
return b''.join(reversed(rv))
def bytes_to_int(bytestr):
return reduce(lambda a, b: a << 8 | b, bytearray(bytestr), 0)
class SigningAlgorithm(object):
"""Subclasses of `SigningAlgorithm` have to implement `get_signature` to
provide signature generation functionality.
"""
def get_signature(self, key, value):
"""Returns the signature for the given key and value"""
raise NotImplementedError()
def verify_signature(self, key, value, sig):
"""Verifies the given signature matches the expected signature"""
return constant_time_compare(sig, self.get_signature(key, value))
class NoneAlgorithm(SigningAlgorithm):
"""This class provides a algorithm that does not perform any signing and
returns an empty signature.
"""
def get_signature(self, key, value):
return b''
class HMACAlgorithm(SigningAlgorithm):
"""This class provides signature generation using HMACs."""
#: The digest method to use with the MAC algorithm. This defaults to sha1
#: but can be changed for any other function in the hashlib module.
default_digest_method = staticmethod(hashlib.sha1)
def __init__(self, digest_method=None):
if digest_method is None:
digest_method = self.default_digest_method
self.digest_method = digest_method
def get_signature(self, key, value):
mac = hmac.new(key, msg=value, digestmod=self.digest_method)
return mac.digest()
class Signer(object):
"""This class can sign bytes and unsign it and validate the signature
provided.
Salt can be used to namespace the hash, so that a signed string is only
valid for a given namespace. Leaving this at the default value or re-using
a salt value across different parts of your application where the same
signed value in one part can mean something different in another part
is a security risk.
See :ref:`the-salt` for an example of what the salt is doing and how you
can utilize it.
.. versionadded:: 0.14
`key_derivation` and `digest_method` were added as arguments to the
class constructor.
.. versionadded:: 0.18
`algorithm` was added as an argument to the class constructor.
"""
#: The digest method to use for the signer. This defaults to sha1 but can
#: be changed for any other function in the hashlib module.
#:
#: .. versionchanged:: 0.14
default_digest_method = staticmethod(hashlib.sha1)
#: Controls how the key is derived. The default is Django style
#: concatenation. Possible values are ``concat``, ``django-concat``
#: and ``hmac``. This is used for deriving a key from the secret key
#: with an added salt.
#:
#: .. versionadded:: 0.14
default_key_derivation = 'django-concat'
def __init__(self, secret_key, salt=None, sep='.', key_derivation=None,
digest_method=None, algorithm=None):
self.secret_key = want_bytes(secret_key)
self.sep = sep
self.salt = 'itsdangerous.Signer' if salt is None else salt
if key_derivation is None:
key_derivation = self.default_key_derivation
self.key_derivation = key_derivation
if digest_method is None:
digest_method = self.default_digest_method
self.digest_method = digest_method
if algorithm is None:
algorithm = HMACAlgorithm(self.digest_method)
self.algorithm = algorithm
def derive_key(self):
"""This method is called to derive the key. If you're unhappy with
the default key derivation choices you can override them here.
Keep in mind that the key derivation in itsdangerous is not intended
to be used as a security method to make a complex key out of a short
password. Instead you should use large random secret keys.
"""
salt = want_bytes(self.salt)
if self.key_derivation == 'concat':
return self.digest_method(salt + self.secret_key).digest()
elif self.key_derivation == 'django-concat':
return self.digest_method(salt + b'signer' +
self.secret_key).digest()
elif self.key_derivation == 'hmac':
mac = hmac.new(self.secret_key, digestmod=self.digest_method)
mac.update(salt)
return mac.digest()
elif self.key_derivation == 'none':
return self.secret_key
else:
raise TypeError('Unknown key derivation method')
def get_signature(self, value):
"""Returns the signature for the given value"""
value = want_bytes(value)
key = self.derive_key()
sig = self.algorithm.get_signature(key, value)
return base64_encode(sig)
def sign(self, value):
"""Signs the given string."""
return value + want_bytes(self.sep) + self.get_signature(value)
def verify_signature(self, value, sig):
"""Verifies the signature for the given value."""
key = self.derive_key()
try:
sig = base64_decode(sig)
except Exception:
return False
return self.algorithm.verify_signature(key, value, sig)
def unsign(self, signed_value):
"""Unsigns the given string."""
signed_value = want_bytes(signed_value)
sep = want_bytes(self.sep)
if sep not in signed_value:
raise BadSignature('No %r found in value' % self.sep)
value, sig = signed_value.rsplit(sep, 1)
if self.verify_signature(value, sig):
return value
raise BadSignature('Signature %r does not match' % sig,
payload=value)
def validate(self, signed_value):
"""Just validates the given signed value. Returns `True` if the
signature exists and is valid, `False` otherwise."""
try:
self.unsign(signed_value)
return True
except BadSignature:
return False
class TimestampSigner(Signer):
"""Works like the regular :class:`Signer` but also records the time
of the signing and can be used to expire signatures. The unsign
method can rause a :exc:`SignatureExpired` method if the unsigning
failed because the signature is expired. This exception is a subclass
of :exc:`BadSignature`.
"""
def get_timestamp(self):
"""Returns the current timestamp. This implementation returns the
seconds since 1/1/2011. The function must return an integer.
"""
return int(time.time() - EPOCH)
def timestamp_to_datetime(self, ts):
"""Used to convert the timestamp from `get_timestamp` into a
datetime object.
"""
return datetime.utcfromtimestamp(ts + EPOCH)
def sign(self, value):
"""Signs the given string and also attaches a time information."""
value = want_bytes(value)
timestamp = base64_encode(int_to_bytes(self.get_timestamp()))
sep = want_bytes(self.sep)
value = value + sep + timestamp
return value + sep + self.get_signature(value)
def unsign(self, value, max_age=None, return_timestamp=False):
"""Works like the regular :meth:`~Signer.unsign` but can also
validate the time. See the base docstring of the class for
the general behavior. If `return_timestamp` is set to `True`
the timestamp of the signature will be returned as naive
:class:`datetime.datetime` object in UTC.
"""
try:
result = Signer.unsign(self, value)
sig_error = None
except BadSignature as e:
sig_error = e
result = e.payload or b''
sep = want_bytes(self.sep)
# If there is no timestamp in the result there is something
# seriously wrong. In case there was a signature error, we raise
# that one directly, otherwise we have a weird situation in which
# we shouldn't have come except someone uses a time-based serializer
# on non-timestamp data, so catch that.
if not sep in result:
if sig_error:
raise sig_error
raise BadTimeSignature('timestamp missing', payload=result)
value, timestamp = result.rsplit(sep, 1)
try:
timestamp = bytes_to_int(base64_decode(timestamp))
except Exception:
timestamp = None
# Signature is *not* okay. Raise a proper error now that we have
# split the value and the timestamp.
if sig_error is not None:
raise BadTimeSignature(text_type(sig_error), payload=value,
date_signed=timestamp)
# Signature was okay but the timestamp is actually not there or
# malformed. Should not happen, but well. We handle it nonetheless
if timestamp is None:
raise BadTimeSignature('Malformed timestamp', payload=value)
# Check timestamp is not older than max_age
if max_age is not None:
age = self.get_timestamp() - timestamp
if age > max_age:
raise SignatureExpired(
'Signature age %s > %s seconds' % (age, max_age),
payload=value,
date_signed=self.timestamp_to_datetime(timestamp))
if return_timestamp:
return value, self.timestamp_to_datetime(timestamp)
return value
def validate(self, signed_value, max_age=None):
"""Just validates the given signed value. Returns `True` if the
signature exists and is valid, `False` otherwise."""
try:
self.unsign(signed_value, max_age=max_age)
return True
except BadSignature:
return False
class Serializer(object):
"""This class provides a serialization interface on top of the
signer. It provides a similar API to json/pickle and other modules but is
slightly differently structured internally. If you want to change the
underlying implementation for parsing and loading you have to override the
:meth:`load_payload` and :meth:`dump_payload` functions.
This implementation uses simplejson if available for dumping and loading
and will fall back to the standard library's json module if it's not
available.
Starting with 0.14 you do not need to subclass this class in order to
switch out or customer the :class:`Signer`. You can instead also pass a
different class to the constructor as well as keyword arguments as
dictionary that should be forwarded::
s = Serializer(signer_kwargs={'key_derivation': 'hmac'})
.. versionchanged:: 0.14:
The `signer` and `signer_kwargs` parameters were added to the
constructor.
"""
#: If a serializer module or class is not passed to the constructor
#: this one is picked up. This currently defaults to :mod:`json`.
default_serializer = json
#: The default :class:`Signer` class that is being used by this
#: serializer.
#:
#: .. versionadded:: 0.14
default_signer = Signer
def __init__(self, secret_key, salt=b'itsdangerous', serializer=None,
signer=None, signer_kwargs=None):
self.secret_key = want_bytes(secret_key)
self.salt = want_bytes(salt)
if serializer is None:
serializer = self.default_serializer
self.serializer = serializer
self.is_text_serializer = is_text_serializer(serializer)
if signer is None:
signer = self.default_signer
self.signer = signer
self.signer_kwargs = signer_kwargs or {}
def load_payload(self, payload, serializer=None):
"""Loads the encoded object. This function raises :class:`BadPayload`
if the payload is not valid. The `serializer` parameter can be used to
override the serializer stored on the class. The encoded payload is
always byte based.
"""
if serializer is None:
serializer = self.serializer
is_text = self.is_text_serializer
else:
is_text = is_text_serializer(serializer)
try:
if is_text:
payload = payload.decode('utf-8')
return serializer.loads(payload)
except Exception as e:
raise BadPayload('Could not load the payload because an '
'exception occurred on unserializing the data',
original_error=e)
def dump_payload(self, obj):
"""Dumps the encoded object. The return value is always a
bytestring. If the internal serializer is text based the value
will automatically be encoded to utf-8.
"""
return want_bytes(self.serializer.dumps(obj))
def make_signer(self, salt=None):
"""A method that creates a new instance of the signer to be used.
The default implementation uses the :class:`Signer` baseclass.
"""
if salt is None:
salt = self.salt
return self.signer(self.secret_key, salt=salt, **self.signer_kwargs)
def dumps(self, obj, salt=None):
"""Returns a signed string serialized with the internal serializer.
The return value can be either a byte or unicode string depending
on the format of the internal serializer.
"""
payload = want_bytes(self.dump_payload(obj))
rv = self.make_signer(salt).sign(payload)
if self.is_text_serializer:
rv = rv.decode('utf-8')
return rv
def dump(self, obj, f, salt=None):
"""Like :meth:`dumps` but dumps into a file. The file handle has
to be compatible with what the internal serializer expects.
"""
f.write(self.dumps(obj, salt))
def loads(self, s, salt=None):
"""Reverse of :meth:`dumps`, raises :exc:`BadSignature` if the
signature validation fails.
"""
s = want_bytes(s)
return self.load_payload(self.make_signer(salt).unsign(s))
def load(self, f, salt=None):
"""Like :meth:`loads` but loads from a file."""
return self.loads(f.read(), salt)
def loads_unsafe(self, s, salt=None):
"""Like :meth:`loads` but without verifying the signature. This is
potentially very dangerous to use depending on how your serializer
works. The return value is ``(signature_okay, payload)`` instead of
just the payload. The first item will be a boolean that indicates
if the signature is okay (``True``) or if it failed. This function
never fails.
Use it for debugging only and if you know that your serializer module
is not exploitable (eg: do not use it with a pickle serializer).
.. versionadded:: 0.15
"""
return self._loads_unsafe_impl(s, salt)
def _loads_unsafe_impl(self, s, salt, load_kwargs=None,
load_payload_kwargs=None):
"""Lowlevel helper function to implement :meth:`loads_unsafe` in
serializer subclasses.
"""
try:
return True, self.loads(s, salt=salt, **(load_kwargs or {}))
except BadSignature as e:
if e.payload is None:
return False, None
try:
return False, self.load_payload(e.payload,
**(load_payload_kwargs or {}))
except BadPayload:
return False, None
def load_unsafe(self, f, *args, **kwargs):
"""Like :meth:`loads_unsafe` but loads from a file.
.. versionadded:: 0.15
"""
return self.loads_unsafe(f.read(), *args, **kwargs)
class TimedSerializer(Serializer):
"""Uses the :class:`TimestampSigner` instead of the default
:meth:`Signer`.
"""
default_signer = TimestampSigner
def loads(self, s, max_age=None, return_timestamp=False, salt=None):
"""Reverse of :meth:`dumps`, raises :exc:`BadSignature` if the
signature validation fails. If a `max_age` is provided it will
ensure the signature is not older than that time in seconds. In
case the signature is outdated, :exc:`SignatureExpired` is raised
which is a subclass of :exc:`BadSignature`. All arguments are
forwarded to the signer's :meth:`~TimestampSigner.unsign` method.
"""
base64d, timestamp = self.make_signer(salt) \
.unsign(s, max_age, return_timestamp=True)
payload = self.load_payload(base64d)
if return_timestamp:
return payload, timestamp
return payload
def loads_unsafe(self, s, max_age=None, salt=None):
load_kwargs = {'max_age': max_age}
load_payload_kwargs = {}
return self._loads_unsafe_impl(s, salt, load_kwargs, load_payload_kwargs)
class JSONWebSignatureSerializer(Serializer):
"""This serializer implements JSON Web Signature (JWS) support. Only
supports the JWS Compact Serialization.
"""
jws_algorithms = {
'HS256': HMACAlgorithm(hashlib.sha256),
'HS384': HMACAlgorithm(hashlib.sha384),
'HS512': HMACAlgorithm(hashlib.sha512),
'none': NoneAlgorithm(),
}
#: The default algorithm to use for signature generation
default_algorithm = 'HS256'
default_serializer = compact_json
def __init__(self, secret_key, salt=None, serializer=None,
signer=None, signer_kwargs=None, algorithm_name=None):
Serializer.__init__(self, secret_key, salt, serializer,
signer, signer_kwargs)
if algorithm_name is None:
algorithm_name = self.default_algorithm
self.algorithm_name = algorithm_name
self.algorithm = self.make_algorithm(algorithm_name)
def load_payload(self, payload, return_header=False):
payload = want_bytes(payload)
if b'.' not in payload:
raise BadPayload('No "." found in value')
base64d_header, base64d_payload = payload.split(b'.', 1)
try:
json_header = base64_decode(base64d_header)
except Exception as e:
raise BadHeader('Could not base64 decode the header because of '
'an exception', original_error=e)
try:
json_payload = base64_decode(base64d_payload)
except Exception as e:
raise BadPayload('Could not base64 decode the payload because of '
'an exception', original_error=e)
try:
header = Serializer.load_payload(self, json_header,
serializer=json)
except BadData as e:
raise BadHeader('Could not unserialize header because it was '
'malformed', original_error=e)
if not isinstance(header, dict):
raise BadHeader('Header payload is not a JSON object',
header=header)
payload = Serializer.load_payload(self, json_payload)
if return_header:
return payload, header
return payload
def dump_payload(self, header, obj):
base64d_header = base64_encode(self.serializer.dumps(header))
base64d_payload = base64_encode(self.serializer.dumps(obj))
return base64d_header + b'.' + base64d_payload
def make_algorithm(self, algorithm_name):
try:
return self.jws_algorithms[algorithm_name]
except KeyError:
raise NotImplementedError('Algorithm not supported')
def make_signer(self, salt=None, algorithm=None):
if salt is None:
salt = self.salt
key_derivation = 'none' if salt is None else None
if algorithm is None:
algorithm = self.algorithm
return self.signer(self.secret_key, salt=salt, sep='.',
key_derivation=key_derivation, algorithm=algorithm)
def make_header(self, header_fields):
header = header_fields.copy() if header_fields else {}
header['alg'] = self.algorithm_name
return header
def dumps(self, obj, salt=None, header_fields=None):
"""Like :meth:`~Serializer.dumps` but creates a JSON Web Signature. It
also allows for specifying additional fields to be included in the JWS
Header.
"""
header = self.make_header(header_fields)
signer = self.make_signer(salt, self.algorithm)
return signer.sign(self.dump_payload(header, obj))
def loads(self, s, salt=None, return_header=False):
"""Reverse of :meth:`dumps`. If requested via `return_header` it will
return a tuple of payload and header.
"""
payload, header = self.load_payload(
self.make_signer(salt, self.algorithm).unsign(want_bytes(s)),
return_header=True)
if header.get('alg') != self.algorithm_name:
raise BadHeader('Algorithm mismatch', header=header,
payload=payload)
if return_header:
return payload, header
return payload
def loads_unsafe(self, s, salt=None, return_header=False):
kwargs = {'return_header': return_header}
return self._loads_unsafe_impl(s, salt, kwargs, kwargs)
class TimedJSONWebSignatureSerializer(JSONWebSignatureSerializer):
"""Works like the regular :class:`JSONWebSignatureSerializer` but also
records the time of the signing and can be used to expire signatures.
JWS currently does not specify this behavior but it mentions a possibility
extension like this in the spec. Expiry date is encoded into the header
similarily as specified in `draft-ietf-oauth-json-web-token
<http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#expDef`_.
The unsign method can raise a :exc:`SignatureExpired` method if the
unsigning failed because the signature is expired. This exception is a
subclass of :exc:`BadSignature`.
"""
DEFAULT_EXPIRES_IN = 3600
def __init__(self, secret_key, expires_in=None, **kwargs):
JSONWebSignatureSerializer.__init__(self, secret_key, **kwargs)
if expires_in is None:
expires_in = self.DEFAULT_EXPIRES_IN
self.expires_in = expires_in
def make_header(self, header_fields):
header = JSONWebSignatureSerializer.make_header(self, header_fields)
iat = self.now()
exp = iat + self.expires_in
header['iat'] = iat
header['exp'] = exp
return header
def loads(self, s, salt=None, return_header=False):
payload, header = JSONWebSignatureSerializer.loads(
self, s, salt, return_header=True)
if 'exp' not in header:
raise BadSignature('Missing expiry date', payload=payload)
if not (isinstance(header['exp'], number_types)
and header['exp'] > 0):
raise BadSignature('expiry date is not an IntDate',
payload=payload)
if header['exp'] < self.now():
raise SignatureExpired('Signature expired', payload=payload,
date_signed=self.get_issue_date(header))
if return_header:
return payload, header
return payload
def get_issue_date(self, header):
rv = header.get('iat')
if isinstance(rv, number_types):
return datetime.utcfromtimestamp(int(rv))
def now(self):
return int(time.time())
class URLSafeSerializerMixin(object):
"""Mixed in with a regular serializer it will attempt to zlib compress
the string to make it shorter if necessary. It will also base64 encode
the string so that it can safely be placed in a URL.
"""
def load_payload(self, payload):
decompress = False
if payload.startswith(b'.'):
payload = payload[1:]
decompress = True
try:
json = base64_decode(payload)
except Exception as e:
raise BadPayload('Could not base64 decode the payload because of '
'an exception', original_error=e)
if decompress:
try:
json = zlib.decompress(json)
except Exception as e:
raise BadPayload('Could not zlib decompress the payload before '
'decoding the payload', original_error=e)
return super(URLSafeSerializerMixin, self).load_payload(json)
def dump_payload(self, obj):
json = super(URLSafeSerializerMixin, self).dump_payload(obj)
is_compressed = False
compressed = zlib.compress(json)
if len(compressed) < (len(json) - 1):
json = compressed
is_compressed = True
base64d = base64_encode(json)
if is_compressed:
base64d = b'.' + base64d
return base64d
class URLSafeSerializer(URLSafeSerializerMixin, Serializer):
"""Works like :class:`Serializer` but dumps and loads into a URL
safe string consisting of the upper and lowercase character of the
alphabet as well as ``'_'``, ``'-'`` and ``'.'``.
"""
default_serializer = compact_json
class URLSafeTimedSerializer(URLSafeSerializerMixin, TimedSerializer):
"""Works like :class:`TimedSerializer` but dumps and loads into a URL
safe string consisting of the upper and lowercase character of the
alphabet as well as ``'_'``, ``'-'`` and ``'.'``.
"""
default_serializer = compact_json
|
sahilTakiar/spark
|
refs/heads/master
|
examples/src/main/python/ml/multilayer_perceptron_classification.py
|
123
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.classification import MultilayerPerceptronClassifier
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder.appName("multilayer_perceptron_classification_example").getOrCreate()
# $example on$
# Load training data
data = spark.read.format("libsvm")\
.load("data/mllib/sample_multiclass_classification_data.txt")
# Split the data into train and test
splits = data.randomSplit([0.6, 0.4], 1234)
train = splits[0]
test = splits[1]
# specify layers for the neural network:
# input layer of size 4 (features), two intermediate of size 5 and 4
# and output of size 3 (classes)
layers = [4, 5, 4, 3]
# create the trainer and set its parameters
trainer = MultilayerPerceptronClassifier(maxIter=100, layers=layers, blockSize=128, seed=1234)
# train the model
model = trainer.fit(train)
# compute accuracy on the test set
result = model.transform(test)
predictionAndLabels = result.select("prediction", "label")
evaluator = MulticlassClassificationEvaluator(metricName="accuracy")
print("Test set accuracy = " + str(evaluator.evaluate(predictionAndLabels)))
# $example off$
spark.stop()
|
iw3hxn/LibrERP
|
refs/heads/master
|
account_financial_report_webkit/wizard/partner_balance_wizard.py
|
1
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright Camptocamp SA 2011
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields, osv
class AccountPartnerBalanceWizard(osv.osv_memory):
"""Will launch partner balance report and pass required args"""
_inherit = "account.common.balance.report"
_name = "partner.balance.webkit"
_description = "Partner Balance Report"
_columns = {
'exclude_partner_balance_zero': fields.boolean('Exclude Partner Balance Zero'),
'result_selection': fields.selection([('customer','Receivable Accounts'),
('supplier','Payable Accounts'),
('customer_supplier','Receivable and Payable Accounts')],
"Partner's", required=True),
'partner_ids': fields.many2many('res.partner', string='Filter on partner',
help="Only selected partners will be printed. Leave empty to print all partners."),
}
_defaults = {
'result_selection': 'customer_supplier',
}
def pre_print_report(self, cr, uid, ids, data, context=None):
data = super(AccountPartnerBalanceWizard, self).pre_print_report(cr, uid, ids, data, context)
if context is None:
context = {}
vals = self.read(cr, uid, ids,
['result_selection', 'partner_ids', 'exclude_partner_balance_zero'],
context=context)[0]
data['form'].update(vals)
return data
def _print_report(self, cursor, uid, ids, data, context=None):
context = context or {}
# we update form with display account value
data = self.pre_print_report(cursor, uid, ids, data, context=context)
return {'type': 'ir.actions.report.xml',
'report_name': 'account.account_report_partner_balance_webkit',
'datas': data}
AccountPartnerBalanceWizard()
|
hasadna/django
|
refs/heads/master
|
django/conf/project_template/manage.py
|
1070
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "{{ project_name }}.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
olt/mapproxy
|
refs/heads/master
|
mapproxy/image/merge.py
|
8
|
# This file is part of the MapProxy project.
# Copyright (C) 2010-2016 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Image and tile manipulation (transforming, merging, etc).
"""
from collections import namedtuple
from mapproxy.compat.image import Image, ImageColor, ImageChops, ImageMath
from mapproxy.compat.image import has_alpha_composite_support
from mapproxy.image import BlankImageSource, ImageSource
from mapproxy.image.opts import create_image, ImageOptions
from mapproxy.image.mask import mask_image
import logging
log = logging.getLogger('mapproxy.image')
class LayerMerger(object):
"""
Merge multiple layers into one image.
"""
def __init__(self):
self.layers = []
self.cacheable = True
def add(self, img, coverage=None):
"""
Add one layer image to merge. Bottom-layers first.
"""
if img is not None:
self.layers.append((img, coverage))
class LayerMerger(LayerMerger):
def merge(self, image_opts, size=None, bbox=None, bbox_srs=None, coverage=None):
"""
Merge the layers. If the format is not 'png' just return the last image.
:param format: The image format for the result.
:param size: The size for the merged output.
:rtype: `ImageSource`
"""
if not self.layers:
return BlankImageSource(size=size, image_opts=image_opts, cacheable=True)
if len(self.layers) == 1:
layer_img, layer_coverage = self.layers[0]
layer_opts = layer_img.image_opts
if (((layer_opts and not layer_opts.transparent) or image_opts.transparent)
and (not size or size == layer_img.size)
and (not layer_coverage or not layer_coverage.clip)
and not coverage):
# layer is opaque, no need to make transparent or add bgcolor
return layer_img
if size is None:
size = self.layers[0][0].size
cacheable = self.cacheable
result = create_image(size, image_opts)
for layer_img, layer_coverage in self.layers:
if not layer_img.cacheable:
cacheable = False
img = layer_img.as_image()
layer_image_opts = layer_img.image_opts
if layer_image_opts is None:
opacity = None
else:
opacity = layer_image_opts.opacity
if layer_coverage and layer_coverage.clip:
img = mask_image(img, bbox, bbox_srs, layer_coverage)
if result.mode != 'RGBA':
merge_composite = False
else:
merge_composite = has_alpha_composite_support()
if 'transparency' in img.info:
# non-paletted PNGs can have a fixed transparency value
# convert to RGBA to have full alpha
img = img.convert('RGBA')
if merge_composite:
if opacity is not None and opacity < 1.0:
# fade-out img to add opacity value
img = img.convert("RGBA")
alpha = img.split()[3]
alpha = ImageChops.multiply(
alpha,
ImageChops.constant(alpha, int(255 * opacity))
)
img.putalpha(alpha)
if img.mode in ('RGBA', 'P'):
# assume paletted images have transparency
if img.mode == 'P':
img = img.convert('RGBA')
result = Image.alpha_composite(result, img)
else:
result.paste(img, (0, 0))
else:
if opacity is not None and opacity < 1.0:
img = img.convert(result.mode)
result = Image.blend(result, img, layer_image_opts.opacity)
elif img.mode in ('RGBA', 'P'):
# assume paletted images have transparency
if img.mode == 'P':
img = img.convert('RGBA')
# paste w transparency mask from layer
result.paste(img, (0, 0), img)
else:
result.paste(img, (0, 0))
# apply global clip coverage
if coverage:
bg = create_image(size, image_opts)
mask = mask_image(result, bbox, bbox_srs, coverage)
bg.paste(result, (0, 0), mask)
result = bg
return ImageSource(result, size=size, image_opts=image_opts, cacheable=cacheable)
band_ops = namedtuple("band_ops", ["dst_band", "src_img", "src_band", "factor"])
class BandMerger(object):
"""
Merge bands from multiple sources into one image.
sources:
r: [{source: nir_cache, band: 0, factor: 0.4}, {source: dop_cache, band: 0, factor: 0.6}]
g: [{source: dop_cache, band: 2}]
b: [{source: dop_cache, band: 1}]
sources:
l: [
{source: dop_cache, band: 0, factor: 0.6},
{source: dop_cache, band: 1, factor: 0.3},
{source: dop_cache, band: 2, factor: 0.1},
]
"""
def __init__(self, mode=None):
self.ops = []
self.cacheable = True
self.mode = mode
self.max_band = {}
self.max_src_images = 0
def add_ops(self, dst_band, src_img, src_band, factor=1.0):
self.ops.append(band_ops(
dst_band=dst_band,
src_img=src_img,
src_band=src_band,
factor=factor,
))
# store highest requested band index for each source
self.max_band[src_img] = max(self.max_band.get(src_img, 0), src_band)
self.max_src_images = max(src_img+1, self.max_src_images)
def merge(self, sources, image_opts, size=None, bbox=None, bbox_srs=None, coverage=None):
if len(sources) < self.max_src_images:
return BlankImageSource(size=size, image_opts=image_opts, cacheable=True)
if size is None:
size = sources[0].size
# load src bands
src_img_bands = []
for i, layer_img in enumerate(sources):
img = layer_img.as_image()
if i not in self.max_band:
# do not split img if not requested by any op
src_img_bands.append(None)
continue
if self.max_band[i] == 3 and img.mode != 'RGBA':
# convert to RGBA if band idx 3 is requestd (e.g. P or RGB src)
img = img.convert('RGBA')
elif img.mode == 'P':
img = img.convert('RGB')
src_img_bands.append(img.split())
tmp_mode = self.mode
if tmp_mode == 'RGBA':
result_bands = [None, None, None, None]
elif tmp_mode == 'RGB':
result_bands = [None, None, None]
elif tmp_mode == 'L':
result_bands = [None]
else:
raise ValueError("unsupported destination mode %s", image_opts.mode)
for op in self.ops:
chan = src_img_bands[op.src_img][op.src_band]
if op.factor != 1.0:
chan = ImageMath.eval("convert(int(float(a) * %f), 'L')" % op.factor, a=chan)
if result_bands[op.dst_band] is None:
result_bands[op.dst_band] = chan
else:
result_bands[op.dst_band] = ImageChops.add(
result_bands[op.dst_band],
chan,
)
else:
result_bands[op.dst_band] = chan
for i, b in enumerate(result_bands):
if b is None:
# band not set
b = Image.new("L", size, 255 if i == 3 else 0)
result_bands[i] = b
result = Image.merge(tmp_mode, result_bands)
return ImageSource(result, size=size, image_opts=image_opts)
def merge_images(layers, image_opts, size=None, bbox=None, bbox_srs=None, merger=None):
"""
Merge multiple images into one.
:param images: list of `ImageSource`, bottom image first
:param format: the format of the output `ImageSource`
:param size: size of the merged image, if ``None`` the size
of the first image is used
:param bbox: Bounding box
:param bbox_srs: Bounding box SRS
:param merger: Image merger
:rtype: `ImageSource`
"""
if merger is None:
merger = LayerMerger()
# BandMerger does not have coverage support, passing only images
if isinstance(merger, BandMerger):
sources = [l[0] if isinstance(l, tuple) else l for l in layers]
return merger.merge(sources, image_opts=image_opts, size=size, bbox=bbox, bbox_srs=bbox_srs)
for layer in layers:
if isinstance(layer, tuple):
merger.add(layer[0], layer[1])
else:
merger.add(layer)
return merger.merge(image_opts=image_opts, size=size, bbox=bbox, bbox_srs=bbox_srs)
def concat_legends(legends, format='png', size=None, bgcolor='#ffffff', transparent=True):
"""
Merge multiple legends into one
:param images: list of `ImageSource`, bottom image first
:param format: the format of the output `ImageSource`
:param size: size of the merged image, if ``None`` the size
will be calculated
:rtype: `ImageSource`
"""
if not legends:
return BlankImageSource(size=(1,1), image_opts=ImageOptions(bgcolor=bgcolor, transparent=transparent))
if len(legends) == 1:
return legends[0]
legends = legends[:]
legends.reverse()
if size is None:
legend_width = 0
legend_height = 0
legend_position_y = []
#iterate through all legends, last to first, calc img size and remember the y-position
for legend in legends:
legend_position_y.append(legend_height)
tmp_img = legend.as_image()
legend_width = max(legend_width, tmp_img.size[0])
legend_height += tmp_img.size[1] #images shall not overlap themselfs
size = [legend_width, legend_height]
bgcolor = ImageColor.getrgb(bgcolor)
if transparent:
img = Image.new('RGBA', size, bgcolor+(0,))
else:
img = Image.new('RGB', size, bgcolor)
for i in range(len(legends)):
legend_img = legends[i].as_image()
if legend_img.mode == 'RGBA':
# paste w transparency mask from layer
img.paste(legend_img, (0, legend_position_y[i]), legend_img)
else:
img.paste(legend_img, (0, legend_position_y[i]))
return ImageSource(img, image_opts=ImageOptions(format=format))
|
amir-qayyum-khan/lore
|
refs/heads/master
|
learningresources/migrations/0009_allow_blank_description.py
|
2
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
# pylint: skip-file
class Migration(migrations.Migration):
dependencies = [
('learningresources', '0008_remove_staticasset_learning_resources'),
]
operations = [
migrations.AlterField(
model_name='learningresource',
name='description',
field=models.TextField(blank=True),
),
]
|
flavio-casacurta/Nat2Py
|
refs/heads/master
|
Adabas/demo/Emptel/string25.py
|
1
|
import re as _re
class _multimap:
"""Helper class for combining multiple mappings.
Used by .{safe_,}substitute() to combine the mapping and keyword
arguments.
"""
def __init__(self, primary, secondary):
self._primary = primary
self._secondary = secondary
def __getitem__(self, key):
try:
return self._primary[key]
except KeyError:
return self._secondary[key]
class _TemplateMetaclass(type):
pattern = r"""
%(delim)s(?:
(?P<escaped>%(delim)s) | # Escape sequence of two delimiters
(?P<named>%(id)s) | # delimiter and a Python identifier
{(?P<braced>%(id)s)} | # delimiter and a braced identifier
(?P<invalid>) # Other ill-formed delimiter exprs
)
"""
def __init__(cls, name, bases, dct):
super(_TemplateMetaclass, cls).__init__(name, bases, dct)
if 'pattern' in dct:
pattern = cls.pattern
else:
pattern = _TemplateMetaclass.pattern % {
'delim' : _re.escape(cls.delimiter),
'id' : cls.idpattern,
}
cls.pattern = _re.compile(pattern, _re.IGNORECASE | _re.VERBOSE)
class Template:
"""A string class for supporting $-substitutions."""
__metaclass__ = _TemplateMetaclass
delimiter = '$'
idpattern = r'[_a-z][_a-z0-9]*'
def __init__(self, template):
self.template = template
# Search for $$, $identifier, ${identifier}, and any bare $'s
def _invalid(self, mo):
i = mo.start('invalid')
lines = self.template[:i].splitlines(True)
if not lines:
colno = 1
lineno = 1
else:
colno = i - len(''.join(lines[:-1]))
lineno = len(lines)
raise ValueError('Invalid placeholder in string: line %d, col %d' %
(lineno, colno))
def substitute(self, *args, **kws):
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = kws
elif kws:
mapping = _multimap(kws, args[0])
else:
mapping = args[0]
# Helper function for .sub()
def convert(mo):
# Check the most common path first.
named = mo.group('named') or mo.group('braced')
if named is not None:
val = mapping[named]
# We use this idiom instead of str() because the latter will
# fail if val is a Unicode containing non-ASCII characters.
return '%s' % (val,)
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
self._invalid(mo)
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
def safe_substitute(self, *args, **kws):
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = kws
elif kws:
mapping = _multimap(kws, args[0])
else:
mapping = args[0]
# Helper function for .sub()
def convert(mo):
named = mo.group('named')
if named is not None:
try:
# We use this idiom instead of str() because the latter
# will fail if val is a Unicode containing non-ASCII
return '%s' % (mapping[named],)
except KeyError:
return self.delimiter + named
braced = mo.group('braced')
if braced is not None:
try:
return '%s' % (mapping[braced],)
except KeyError:
return self.delimiter + '{' + braced + '}'
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
return self.delimiter
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
|
thaim/ansible
|
refs/heads/fix-broken-link
|
lib/ansible/modules/cloud/vmware/vmware_host_active_directory.py
|
21
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_host_active_directory
short_description: Joins an ESXi host system to an Active Directory domain or leaves it
description:
- This module can be used to join or leave an ESXi host to or from an Active Directory domain.
version_added: 2.8
author:
- Christian Kotte (@ckotte)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
ad_domain:
description:
- AD Domain to join.
type: str
aliases: [ domain, domain_name ]
ad_user:
description:
- Username for AD domain join.
type: str
ad_password:
description:
- Password for AD domain join.
type: str
ad_state:
description:
- Whether the ESXi host is joined to an AD domain or not.
type: str
choices: [ present, absent ]
default: 'absent'
aliases: [ state ]
esxi_hostname:
description:
- Name of the host system to work with.
- This parameter is required if C(cluster_name) is not specified.
type: str
cluster_name:
description:
- Name of the cluster from which all host systems will be used.
- This parameter is required if C(esxi_hostname) is not specified.
type: str
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Join an AD domain
vmware_host_active_directory:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
ad_domain: example.local
ad_user: adjoin
ad_password: Password123$
ad_state: present
validate_certs: no
delegate_to: localhost
- name: Leave AD domain
vmware_host_active_directory:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
ad_state: absent
validate_certs: no
delegate_to: localhost
'''
RETURN = r'''
results:
description: metadata about host system's AD domain join state
returned: always
type: dict
sample: {
"esxi01": {
"changed": true,
"domain": "example.local",
"membership_state": "ok",
"msg": "Host joined to AD domain",
"ad_state": "present",
"ad_state_current": "present",
"ad_state_previous": "absent",
},
}
'''
try:
from pyVmomi import vim
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import PyVmomi, TaskError, vmware_argument_spec, wait_for_task
from ansible.module_utils._text import to_native
class VmwareHostAdAuthentication(PyVmomi):
"""Manage Active Directory Authentication for an ESXi host system"""
def __init__(self, module):
super(VmwareHostAdAuthentication, self).__init__(module)
cluster_name = self.params.get('cluster_name')
esxi_host_name = self.params.get('esxi_hostname')
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
if not self.hosts:
self.module.fail_json(msg="Failed to find host system.")
def ensure(self):
"""Manage Active Directory Authentication for an ESXi host system"""
results = dict(changed=False, result=dict())
desired_state = self.params.get('ad_state')
domain = self.params.get('ad_domain')
ad_user = self.params.get('ad_user')
ad_password = self.params.get('ad_password')
host_change_list = []
for host in self.hosts:
changed = False
results['result'][host.name] = dict(msg='')
active_directory_info = self.get_ad_info(host)
results['result'][host.name]['ad_state'] = desired_state
results['result'][host.name]['ad_domain'] = domain
if desired_state == 'present':
# Don't do anything if already enabled and joined
if active_directory_info.enabled:
# Joined and no problems with the domain membership
if active_directory_info.domainMembershipStatus == 'ok':
results['result'][host.name]['changed'] = False
results['result'][host.name]['membership_state'] = active_directory_info.domainMembershipStatus
results['result'][host.name]['joined_domain'] = active_directory_info.joinedDomain
results['result'][host.name]['trusted_domains'] = active_directory_info.trustedDomain
results['result'][host.name]['msg'] = (
"Host is joined to AD domain and there are no problems with the domain membership"
)
# Joined, but problems with the domain membership
else:
changed = results['result'][host.name]['changed'] = True
results['result'][host.name]['membership_state'] = active_directory_info.domainMembershipStatus
results['result'][host.name]['joined_domain'] = active_directory_info.joinedDomain
results['result'][host.name]['trusted_domains'] = active_directory_info.trustedDomain
msg = "Host is joined to AD domain, but "
if active_directory_info.domainMembershipStatus == 'clientTrustBroken':
msg += "the client side of the trust relationship is broken"
elif active_directory_info.domainMembershipStatus == 'inconsistentTrust':
msg += "unexpected domain controller responded"
elif active_directory_info.domainMembershipStatus == 'noServers':
msg += "the host thinks it's part of a domain and " \
"no domain controllers could be reached to confirm"
elif active_directory_info.domainMembershipStatus == 'serverTrustBroken':
msg += "the server side of the trust relationship is broken (or bad machine password)"
elif active_directory_info.domainMembershipStatus == 'otherProblem':
msg += "there are some problems with the domain membership"
elif active_directory_info.domainMembershipStatus == 'unknown':
msg += "the Active Directory integration provider does not support domain trust checks"
results['result'][host.name]['msg'] = msg
# Enable and join AD domain
else:
if self.module.check_mode:
changed = results['result'][host.name]['changed'] = True
results['result'][host.name]['ad_state_previous'] = "absent"
results['result'][host.name]['ad_state_current'] = "present"
results['result'][host.name]['msg'] = "Host would be joined to AD domain"
else:
ad_authentication = self.get_ad_auth_object(host)
try:
try:
task = ad_authentication.JoinDomain(
domainName=domain, userName=ad_user, password=ad_password
)
wait_for_task(task)
except TaskError as task_err:
self.module.fail_json(
msg="Failed to join domain : %s" % to_native(task_err)
)
changed = results['result'][host.name]['changed'] = True
results['result'][host.name]['ad_state_previous'] = "absent"
results['result'][host.name]['ad_state_current'] = "present"
results['result'][host.name]['msg'] = "Host joined to AD domain"
active_directory_info = self.get_ad_info(host)
results['result'][host.name]['membership_state'] = active_directory_info.domainMembershipStatus
except vim.fault.InvalidState as invalid_state:
self.module.fail_json(
msg="The host has already joined a domain : %s" % to_native(invalid_state.msg)
)
except vim.fault.HostConfigFault as host_fault:
self.module.fail_json(
msg="The host configuration prevents the join operation from succeeding : %s" %
to_native(host_fault.msg)
)
except vim.fault.InvalidLogin as invalid_login:
self.module.fail_json(
msg="Credentials aren't valid : %s" % to_native(invalid_login.msg)
)
except vim.fault.TaskInProgress as task_in_progress:
self.module.fail_json(
msg="The ActiveDirectoryAuthentication object is busy : %s" %
to_native(task_in_progress.msg)
)
except vim.fault.BlockedByFirewall as blocked_by_firewall:
self.module.fail_json(
msg="Ports needed by the join operation are blocked by the firewall : %s" %
to_native(blocked_by_firewall.msg)
)
except vim.fault.DomainNotFound as not_found:
self.module.fail_json(
msg="The domain controller can't be reached : %s" % to_native(not_found.msg)
)
except vim.fault.NoPermissionOnAD as no_permission:
self.module.fail_json(
msg="The specified user has no right to add hosts to the domain : %s" %
to_native(no_permission.msg)
)
except vim.fault.InvalidHostName as invalid_host:
self.module.fail_json(
msg="The domain part of the host's FQDN doesn't match the domain being joined : %s" %
to_native(invalid_host.msg)
)
except vim.fault.ClockSkew as clock_skew:
self.module.fail_json(
msg="The clocks of the host and the domain controller differ by more "
"than the allowed amount of time : %s" % to_native(clock_skew.msg)
)
except vim.fault.ActiveDirectoryFault as ad_fault:
self.module.fail_json(
msg="An error occurred during AD join : %s" %
to_native(ad_fault.msg)
)
elif desired_state == 'absent':
# Don't do anything not joined to any AD domain
if not active_directory_info.enabled:
results['result'][host.name]['changed'] = False
results['result'][host.name]['ad_state_current'] = "absent"
results['result'][host.name]['msg'] = "Host isn't joined to an AD domain"
# Disable and leave AD domain
else:
if self.module.check_mode:
changed = results['result'][host.name]['changed'] = True
results['result'][host.name]['ad_state_previous'] = "present"
results['result'][host.name]['ad_state_current'] = "absent"
results['result'][host.name]['msg'] = "Host would leave the AD domain '%s'" % \
active_directory_info.joinedDomain
else:
ad_authentication = self.get_ad_auth_object(host)
try:
try:
task = ad_authentication.LeaveCurrentDomain(force=True)
wait_for_task(task)
except TaskError as task_err:
self.module.fail_json(
msg="Failed to join domain : %s" % to_native(task_err)
)
changed = results['result'][host.name]['changed'] = True
results['result'][host.name]['ad_state_previous'] = "present"
results['result'][host.name]['ad_state_current'] = "absent"
results['result'][host.name]['msg'] = "Host left the AD domain '%s'" % \
active_directory_info.joinedDomain
except vim.fault.InvalidState as invalid_state:
self.module.fail_json(
msg="The host is not in a domain or there are active permissions for "
"Active Directory users : %s" % to_native(invalid_state.msg)
)
except vim.fault.AuthMinimumAdminPermission as admin_permission:
self.module.fail_json(
msg="This change would leave the system with no Administrator permission "
"on the root node : %s" % to_native(admin_permission.msg)
)
except vim.fault.TaskInProgress as task_in_progress:
self.module.fail_json(
msg="The ActiveDirectoryAuthentication object is busy : %s" %
to_native(task_in_progress.msg)
)
except vim.fault.NonADUserRequired as non_ad_user:
self.module.fail_json(
msg="Only non Active Directory users can initiate the leave domain operation : %s" %
to_native(non_ad_user.msg)
)
except vim.fault.ActiveDirectoryFault as ad_fault:
self.module.fail_json(
msg="An error occurred during AD leave : %s" %
to_native(ad_fault.msg)
)
host_change_list.append(changed)
if any(host_change_list):
results['changed'] = True
self.module.exit_json(**results)
def get_ad_info(self, host_object):
"""Get info about AD membership"""
active_directory_info = None
authentication_store_info = host_object.config.authenticationManagerInfo.authConfig
for authentication_info in authentication_store_info:
if isinstance(authentication_info, vim.host.ActiveDirectoryInfo):
active_directory_info = authentication_info
break
if not active_directory_info:
self.module.fail_json(
msg="Failed to get Active Directory info from authentication manager"
)
return active_directory_info
def get_ad_auth_object(self, host_object):
"""Get AD authentication managed object"""
ad_authentication = None
authentication_store_info = host_object.configManager.authenticationManager.supportedStore
for store_info in authentication_store_info:
if isinstance(store_info, vim.host.ActiveDirectoryAuthentication):
ad_authentication = store_info
break
if not ad_authentication:
self.module.fail_json(
msg="Failed to get Active Directory authentication managed object from authentication manager"
)
return ad_authentication
def main():
"""Main"""
argument_spec = vmware_argument_spec()
argument_spec.update(
ad_domain=dict(type='str', default='', aliases=['domain', 'domain_name']),
ad_user=dict(type='str', default=''),
ad_password=dict(type='str', default='', no_log=True),
ad_state=dict(default='absent', choices=['present', 'absent'], aliases=['state']),
esxi_hostname=dict(type='str', required=False),
cluster_name=dict(type='str', required=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'esxi_hostname'],
],
required_if=[
['ad_state', 'present', ['ad_domain', 'ad_user', 'ad_password']],
],
supports_check_mode=True
)
ad_auth = VmwareHostAdAuthentication(module)
ad_auth.ensure()
if __name__ == '__main__':
main()
|
jacobian-archive/openstack.compute
|
refs/heads/master
|
tests/utils.py
|
3
|
from nose.tools import ok_
def fail(msg):
raise AssertionError(msg)
def assert_in(thing, seq, msg=None):
msg = msg or "'%s' not found in %s" % (thing, seq)
ok_(thing in seq, msg)
def assert_not_in(thing, seq, msg=None):
msg = msg or "unexpected '%s' found in %s" % (thing, seq)
ok_(thing not in seq, msg)
def assert_has_keys(dict, required=[], optional=[]):
keys = dict.keys()
for k in required:
assert_in(k, keys, "required key %s missing from %s" % (k, dict))
allowed_keys = set(required) | set(optional)
extra_keys = set(keys).difference(set(required + optional))
if extra_keys:
fail("found unexpected keys: %s" % list(extra_keys))
def assert_isinstance(thing, kls):
ok_(isinstance(thing, kls), "%s is not an instance of %s" % (thing, kls))
|
aristanetworks/neutron
|
refs/heads/master
|
neutron/plugins/ml2/drivers/freescale/config.py
|
48
|
# Copyright (c) 2014 Freescale Semiconductor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutronclient.v2_0 import client
from oslo_config import cfg
# Freescale CRD Server Configuration used by ML2 Mechanism Driver.
#
# The following configuration is used by Freescale Drivers/Plugin
# like, FWaaS Plugin, VPNaaS Plugin etc.. which connect to Cloud Resource
# Discovery Service (CRD).
# CRD service options required for FSL SDN OS Mech Driver
ml2_fslsdn_opts = [
cfg.StrOpt('crd_user_name', default='crd',
help=_("CRD service Username.")),
cfg.StrOpt('crd_password', default='password',
secret=True,
help=_("CRD Service Password.")),
cfg.StrOpt('crd_tenant_name', default='service',
help=_("CRD Tenant Name.")),
cfg.StrOpt('crd_auth_url',
default='http://127.0.0.1:5000/v2.0/',
help=_("CRD Auth URL.")),
cfg.StrOpt('crd_url',
default='http://127.0.0.1:9797',
help=_("URL for connecting to CRD service.")),
cfg.IntOpt('crd_url_timeout',
default=30,
help=_("Timeout value for connecting to "
"CRD service in seconds.")),
cfg.StrOpt('crd_region_name',
default='RegionOne',
help=_("Region name for connecting to "
"CRD Service in admin context.")),
cfg.BoolOpt('crd_api_insecure',
default=False,
help=_("If set, ignore any SSL validation issues.")),
cfg.StrOpt('crd_auth_strategy',
default='keystone',
help=_("Auth strategy for connecting to "
"neutron in admin context.")),
cfg.StrOpt('crd_ca_certificates_file',
help=_("Location of ca certificates file to use for "
"CRD client requests.")),
]
# Register the configuration option for crd service
cfg.CONF.register_opts(ml2_fslsdn_opts, "ml2_fslsdn")
# shortcut
FSLCONF = cfg.CONF.ml2_fslsdn
SERVICE_TYPE = 'crd'
def get_crdclient():
"""Using the CRD configuration, get and return CRD Client instance."""
crd_client_params = {
'username': FSLCONF.crd_user_name,
'tenant_name': FSLCONF.crd_tenant_name,
'region_name': FSLCONF.crd_region_name,
'password': FSLCONF.crd_password,
'auth_url': FSLCONF.crd_auth_url,
'auth_strategy': FSLCONF.crd_auth_strategy,
'endpoint_url': FSLCONF.crd_url,
'timeout': FSLCONF.crd_url_timeout,
'insecure': FSLCONF.crd_api_insecure,
'service_type': SERVICE_TYPE,
'ca_cert': FSLCONF.crd_ca_certificates_file,
}
return client.Client(**crd_client_params)
|
jjwright55/code_fragments
|
refs/heads/master
|
frags/python/slice/ChannelXML.py
|
1
|
#!/tps/bin/python
import string
from xml.sax import saxutils
from xml.sax import make_parser
from xml.sax.handler import feature_namespaces
def normalize(text):
return string.join(string.split(text), ' ')
def countTuple(tupe):
c = 0
for x in tupe:
c += 1
return c
class Channel:
def __init__(self, id, title, type, subsystem, source):
self.id = id
self.title = title
self.type = type
self.subsystem = subsystem
self.source = source
def getId(self): return self.id
def getTitle(self): return self.title
def getSubsystem(self): return self.subsystem
def getSource(self): return self.source
class ChannelXML(saxutils.DefaultHandler):
def __init__(self):
self.inText = 0
self.Text = ""
self.chanlist = []
def startElement(self, name, attr):
self.inText = 1
self.Text = ""
if name == "channel":
cmd = Channel(attr.get('id'), attr.get('title'),
attr.get('type'), attr.get('subsystem'),
attr.get('source'))
self.chanlist.append(cmd)
def characters(self, ch):
if self.inText:
if ch != '\n':
self.Text = self.Text + ch
def endElement(self, name):
self.inText = 0
self.Text = normalize(self.Text)
def loadXML(self, fname):
parser = make_parser()
parser.setFeature(feature_namespaces, 0)
parser.setContentHandler(self)
parser.parse(fname)
def getchanlist(self):
return self.chanlist
|
a-doumoulakis/tensorflow
|
refs/heads/master
|
tensorflow/contrib/losses/python/losses/loss_ops.py
|
20
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Loss operations for use in neural networks.
Note: All the losses are added to the `GraphKeys.LOSSES` collection.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.util.deprecation import deprecated
__all__ = ["absolute_difference",
"add_loss",
"cosine_distance",
"compute_weighted_loss",
"get_losses",
"get_regularization_losses",
"get_total_loss",
"hinge_loss",
"log_loss",
"mean_pairwise_squared_error",
"mean_squared_error",
"sigmoid_cross_entropy",
"softmax_cross_entropy",
"sparse_softmax_cross_entropy"]
def _scale_losses(losses, weights):
"""Computes the scaled loss.
Args:
losses: A `Tensor` of size [batch_size, d1, ... dN].
weights: A `Tensor` of size [1], [batch_size] or [batch_size, d1, ... dN].
The `losses` are reduced (tf.reduce_sum) until its dimension matches
that of `weights` at which point the reduced `losses` are element-wise
multiplied by `weights` and a final reduce_sum is computed on the result.
Conceptually, this operation is equivalent to broadcasting (tiling)
`weights` to be the same size as `losses`, performing an element-wise
multiplication, and summing the result.
Returns:
A scalar tf.float32 `Tensor` whose value represents the sum of the scaled
`losses`.
"""
# First, compute the sum of the losses over all elements:
start_index = max(0, weights.get_shape().ndims)
reduction_indices = list(range(start_index, losses.get_shape().ndims))
reduced_losses = math_ops.reduce_sum(losses,
reduction_indices=reduction_indices)
reduced_losses = math_ops.multiply(reduced_losses, weights)
return math_ops.reduce_sum(reduced_losses)
def _safe_div(numerator, denominator, name="value"):
"""Computes a safe divide which returns 0 if the denominator is zero.
Note that the function contains an additional conditional check that is
necessary for avoiding situations where the loss is zero causing NaNs to
creep into the gradient computation.
Args:
numerator: An arbitrary `Tensor`.
denominator: A `Tensor` whose shape matches `numerator` and whose values are
assumed to be non-negative.
name: An optional name for the returned op.
Returns:
The element-wise value of the numerator divided by the denominator.
"""
return array_ops.where(
math_ops.greater(denominator, 0),
math_ops.div(numerator, array_ops.where(
math_ops.equal(denominator, 0),
array_ops.ones_like(denominator), denominator)),
array_ops.zeros_like(numerator),
name=name)
def _safe_mean(losses, num_present):
"""Computes a safe mean of the losses.
Args:
losses: A tensor whose elements contain individual loss measurements.
num_present: The number of measurable losses in the tensor.
Returns:
A scalar representing the mean of the losses. If `num_present` is zero,
then zero is returned.
"""
total_loss = math_ops.reduce_sum(losses)
return _safe_div(total_loss, num_present)
@deprecated("2016-12-30", "Use tf.losses.compute_weighted_loss instead.")
def compute_weighted_loss(losses, weights=1.0, scope=None):
"""Computes the weighted loss.
Args:
losses: A tensor of size [batch_size, d1, ... dN].
weights: A tensor of size [1] or [batch_size, d1, ... dK] where K < N.
scope: the scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` that returns the weighted loss.
Raises:
ValueError: If `weights` is `None` or the shape is not compatible with
`losses`, or if the number of dimensions (rank) of either `losses` or
`weights` is missing.
"""
with ops.name_scope(scope, "weighted_loss", [losses, weights]):
losses = ops.convert_to_tensor(losses)
input_dtype = losses.dtype
losses = math_ops.to_float(losses)
weights = math_ops.to_float(ops.convert_to_tensor(weights))
if losses.get_shape().ndims is None:
raise ValueError("losses.get_shape().ndims cannot be None")
weights_shape = weights.get_shape()
if weights_shape.ndims is None:
raise ValueError("weights.get_shape().ndims cannot be None")
if weights_shape.ndims > 1 and weights_shape.dims[-1].is_compatible_with(1):
weights = array_ops.squeeze(weights, [-1])
total_loss = _scale_losses(losses, weights)
num_present = _num_present(losses, weights)
mean_loss = _safe_mean(total_loss, num_present)
# convert the result back to the input type
mean_loss = math_ops.cast(mean_loss, input_dtype)
add_loss(mean_loss)
return mean_loss
def _num_present(losses, weights, per_batch=False):
"""Computes the number of elements in the loss function induced by `weights`.
A given weights tensor induces different numbers of usable elements in the
`losses` tensor. The `weights` tensor is broadcast across `losses` for all
possible dimensions. For example, if `losses` is a tensor of dimension
[4, 5, 6, 3] and `weights` is a tensor of size [4, 5], then `weights` is, in
effect, tiled to match the size of `losses`. Following this effective tile,
the total number of present elements is the number of non-zero weights.
Args:
losses: A tensor of size [batch_size, d1, ... dN].
weights: A tensor of size [1] or [batch_size, d1, ... dK] where K < N.
per_batch: Whether to return the number of elements per batch or as a sum
total.
Returns:
The number of present (non-zero) elements in the losses tensor. If
`per_batch` is True, the value is returned as a tensor of size
[batch_size]. Otherwise, a single scalar tensor is returned.
"""
# If weights is a scalar, its easy to compute:
if weights.get_shape().ndims == 0:
batch_size = array_ops.reshape(array_ops.slice(array_ops.shape(losses),
[0], [1]), [])
num_per_batch = math_ops.div(math_ops.to_float(array_ops.size(losses)),
math_ops.to_float(batch_size))
num_per_batch = array_ops.where(math_ops.equal(weights, 0),
0.0, num_per_batch)
num_per_batch = math_ops.multiply(array_ops.ones(
array_ops.reshape(batch_size, [1])), num_per_batch)
return num_per_batch if per_batch else math_ops.reduce_sum(num_per_batch)
# First, count the number of nonzero weights:
if weights.get_shape().ndims >= 1:
reduction_indices = list(range(1, weights.get_shape().ndims))
num_nonzero_per_batch = math_ops.reduce_sum(
math_ops.to_float(math_ops.not_equal(weights, 0)),
reduction_indices=reduction_indices)
# Next, determine the number of elements that weights would broadcast to:
broadcast_dims = array_ops.slice(array_ops.shape(losses),
[weights.get_shape().ndims], [-1])
num_to_broadcast = math_ops.to_float(math_ops.reduce_prod(broadcast_dims))
num_per_batch = math_ops.multiply(num_nonzero_per_batch, num_to_broadcast)
return num_per_batch if per_batch else math_ops.reduce_sum(num_per_batch)
@deprecated("2016-12-30", "Use tf.losses.add_loss instead.")
@add_arg_scope
def add_loss(loss, loss_collection=ops.GraphKeys.LOSSES):
"""Adds a externally defined loss to the collection of losses.
Args:
loss: A loss `Tensor`.
loss_collection: Optional collection to add the loss to.
"""
if loss_collection:
ops.add_to_collection(loss_collection, loss)
@deprecated("2016-12-30", "Use tf.losses.get_losses instead.")
def get_losses(scope=None, loss_collection=ops.GraphKeys.LOSSES):
"""Gets the list of losses from the loss_collection.
Args:
scope: an optional scope for filtering the losses to return.
loss_collection: Optional losses collection.
Returns:
a list of loss tensors.
"""
return ops.get_collection(loss_collection, scope)
@deprecated("2016-12-30", "Use tf.losses.get_regularization_losses instead.")
def get_regularization_losses(scope=None):
"""Gets the regularization losses.
Args:
scope: an optional scope for filtering the losses to return.
Returns:
A list of regularization losses as Tensors.
"""
return ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES, scope)
@deprecated("2016-12-30", "Use tf.losses.get_total_loss instead.")
def get_total_loss(add_regularization_losses=True, name="total_loss"):
"""Returns a tensor whose value represents the total loss.
Notice that the function adds the given losses to the regularization losses.
Args:
add_regularization_losses: A boolean indicating whether or not to use the
regularization losses in the sum.
name: The name of the returned tensor.
Returns:
A `Tensor` whose value represents the total loss.
Raises:
ValueError: if `losses` is not iterable.
"""
losses = get_losses()
if add_regularization_losses:
losses += get_regularization_losses()
return math_ops.add_n(losses, name=name)
@deprecated("2016-12-30", "Use tf.losses.absolute_difference instead.")
def absolute_difference(predictions, labels=None, weights=1.0, scope=None):
"""Adds an Absolute Difference loss to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
predictions: The predicted outputs.
labels: The ground truth output tensor, same dimensions as 'predictions'.
weights: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid.
"""
with ops.name_scope(scope, "absolute_difference",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
losses = math_ops.abs(math_ops.subtract(predictions, labels))
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30",
"Use tf.losses.sigmoid_cross_entropy instead. Note that the order "
"of the predictions and labels arguments has been changed.")
def sigmoid_cross_entropy(
logits, multi_class_labels, weights=1.0, label_smoothing=0, scope=None):
"""Creates a cross-entropy loss using tf.nn.sigmoid_cross_entropy_with_logits.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of size [`batch_size`], then the loss weights apply to each
corresponding sample.
If `label_smoothing` is nonzero, smooth the labels towards 1/2:
new_multiclass_labels = multiclass_labels * (1 - label_smoothing)
+ 0.5 * label_smoothing
Args:
logits: [batch_size, num_classes] logits outputs of the network .
multi_class_labels: [batch_size, num_classes] labels in (0, 1).
weights: Coefficients for the loss. The tensor must be a scalar, a tensor of
shape [batch_size] or shape [batch_size, num_classes].
label_smoothing: If greater than 0 then smooth the labels.
scope: The scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `logits` doesn't match that of
`multi_class_labels` or if the shape of `weights` is invalid, or if
`weights` is None.
"""
with ops.name_scope(scope, "sigmoid_cross_entropy_loss",
[logits, multi_class_labels, weights]) as scope:
logits.get_shape().assert_is_compatible_with(multi_class_labels.get_shape())
multi_class_labels = math_ops.cast(multi_class_labels, logits.dtype)
if label_smoothing > 0:
multi_class_labels = (multi_class_labels * (1 - label_smoothing) +
0.5 * label_smoothing)
losses = nn.sigmoid_cross_entropy_with_logits(labels=multi_class_labels,
logits=logits,
name="xentropy")
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30",
"Use tf.losses.softmax_cross_entropy instead. Note that the order "
"of the logits and labels arguments has been changed.")
def softmax_cross_entropy(
logits, onehot_labels, weights=1.0, label_smoothing=0, scope=None):
"""Creates a cross-entropy loss using tf.nn.softmax_cross_entropy_with_logits.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of size [`batch_size`], then the loss weights apply to each
corresponding sample.
If `label_smoothing` is nonzero, smooth the labels towards 1/num_classes:
new_onehot_labels = onehot_labels * (1 - label_smoothing)
+ label_smoothing / num_classes
Args:
logits: [batch_size, num_classes] logits outputs of the network .
onehot_labels: [batch_size, num_classes] one-hot-encoded labels.
weights: Coefficients for the loss. The tensor must be a scalar or a tensor
of shape [batch_size].
label_smoothing: If greater than 0 then smooth the labels.
scope: the scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the mean loss value.
Raises:
ValueError: If the shape of `logits` doesn't match that of `onehot_labels`
or if the shape of `weights` is invalid or if `weights` is None.
"""
with ops.name_scope(scope, "softmax_cross_entropy_loss",
[logits, onehot_labels, weights]) as scope:
logits.get_shape().assert_is_compatible_with(onehot_labels.get_shape())
onehot_labels = math_ops.cast(onehot_labels, logits.dtype)
if label_smoothing > 0:
num_classes = math_ops.cast(
array_ops.shape(onehot_labels)[1], logits.dtype)
smooth_positives = 1.0 - label_smoothing
smooth_negatives = label_smoothing / num_classes
onehot_labels = onehot_labels * smooth_positives + smooth_negatives
losses = nn.softmax_cross_entropy_with_logits(labels=onehot_labels,
logits=logits,
name="xentropy")
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30",
"Use tf.losses.sparse_softmax_cross_entropy instead. Note that "
"the order of the logits and labels arguments has been changed.")
def sparse_softmax_cross_entropy(logits, labels, weights=1.0, scope=None):
"""Cross-entropy loss using `tf.nn.sparse_softmax_cross_entropy_with_logits`.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of size [`batch_size`], then the loss weights apply to each
corresponding sample.
Args:
logits: [batch_size, num_classes] logits outputs of the network .
labels: [batch_size, 1] or [batch_size] labels of dtype `int32` or `int64`
in the range `[0, num_classes)`.
weights: Coefficients for the loss. The tensor must be a scalar or a tensor
of shape [batch_size] or [batch_size, 1].
scope: the scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the mean loss value.
Raises:
ValueError: If the shapes of `logits`, `labels`, and `weights` are
incompatible, or if `weights` is None.
"""
with ops.name_scope(scope, "sparse_softmax_cross_entropy_loss",
[logits, labels, weights]) as scope:
labels = array_ops.reshape(labels, shape=[array_ops.shape(labels)[0]])
losses = nn.sparse_softmax_cross_entropy_with_logits(labels=labels,
logits=logits,
name="xentropy")
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30",
"Use tf.losses.log_loss instead. Note that the order of the "
"predictions and labels arguments has been changed.")
def log_loss(predictions, labels=None, weights=1.0, epsilon=1e-7, scope=None):
"""Adds a Log Loss term to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
predictions: The predicted outputs.
labels: The ground truth output tensor, same dimensions as 'predictions'.
weights: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
epsilon: A small increment to add to avoid taking a log of zero.
scope: The scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid.
"""
with ops.name_scope(scope, "log_loss",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
losses = -math_ops.multiply(
labels,
math_ops.log(predictions + epsilon)) - math_ops.multiply(
(1 - labels), math_ops.log(1 - predictions + epsilon))
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30",
"Use tf.losses.hinge_loss instead. Note that the order of the "
"logits and labels arguments has been changed, and to stay "
"unweighted, reduction=Reduction.NONE")
def hinge_loss(logits, labels=None, scope=None):
"""Method that returns the loss tensor for hinge loss.
Args:
logits: The logits, a float tensor.
labels: The ground truth output tensor. Its shape should match the shape of
logits. The values of the tensor are expected to be 0.0 or 1.0.
scope: The scope for the operations performed in computing the loss.
Returns:
An unweighted `Tensor` of same shape as `logits` and `labels` representing the
loss values across the batch.
Raises:
ValueError: If the shapes of `logits` and `labels` don't match.
"""
with ops.name_scope(scope, "hinge_loss", [logits, labels]) as scope:
logits.get_shape().assert_is_compatible_with(labels.get_shape())
# We first need to convert binary labels to -1/1 labels (as floats).
labels = math_ops.to_float(labels)
all_ones = array_ops.ones_like(labels)
labels = math_ops.subtract(2 * labels, all_ones)
return nn_ops.relu(
math_ops.subtract(all_ones, math_ops.multiply(labels, logits)))
@deprecated("2016-12-30", "Use tf.losses.mean_squared_error instead.")
def mean_squared_error(predictions, labels=None, weights=1.0, scope=None):
"""Adds a Sum-of-Squares loss to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
predictions: The predicted outputs.
labels: The ground truth output tensor, same dimensions as 'predictions'.
weights: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid.
"""
with ops.name_scope(scope, "mean_squared_error",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
losses = math_ops.square(math_ops.subtract(predictions, labels))
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30",
"Use tf.losses.mean_pairwise_squared_error instead. Note that the "
"order of the predictions and labels arguments has been changed.")
def mean_pairwise_squared_error(
predictions, labels=None, weights=1.0, scope=None):
"""Adds a pairwise-errors-squared loss to the training procedure.
Unlike `mean_squared_error`, which is a measure of the differences between
corresponding elements of `predictions` and `labels`,
`mean_pairwise_squared_error` is a measure of the differences between pairs of
corresponding elements of `predictions` and `labels`.
For example, if `labels`=[a, b, c] and `predictions`=[x, y, z], there are
three pairs of differences are summed to compute the loss:
loss = [ ((a-b) - (x-y)).^2 + ((a-c) - (x-z)).^2 + ((b-c) - (y-z)).^2 ] / 3
Note that since the inputs are of size [batch_size, d0, ... dN], the
corresponding pairs are computed within each batch sample but not across
samples within a batch. For example, if `predictions` represents a batch of
16 grayscale images of dimension [batch_size, 100, 200], then the set of pairs
is drawn from each image, but not across images.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector.
Args:
predictions: The predicted outputs, a tensor of size [batch_size, d0, .. dN]
where N+1 is the total number of dimensions in `predictions`.
labels: The ground truth output tensor, whose shape must match the shape of
the `predictions` tensor.
weights: Coefficients for the loss a scalar, a tensor of shape [batch_size]
or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid.
"""
with ops.name_scope(scope, "mean_pairwise_squared_error",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
weights = math_ops.to_float(ops.convert_to_tensor(weights))
diffs = math_ops.subtract(predictions, labels)
# Need to verify here since the function doesn't use compute_weighted_loss
if diffs.get_shape().ndims is None:
raise ValueError("diffs.get_shape().ndims cannot be None")
if weights.get_shape().ndims is None:
raise ValueError("weights.get_shape().ndims cannot be None")
reduction_indices = list(range(1, diffs.get_shape().ndims))
sum_squares_diff_per_batch = math_ops.reduce_sum(
math_ops.square(diffs),
reduction_indices=reduction_indices)
num_present_per_batch = _num_present(diffs, weights, per_batch=True)
term1 = 2.0 * _safe_div(sum_squares_diff_per_batch,
num_present_per_batch)
sum_diff = math_ops.reduce_sum(diffs, reduction_indices=reduction_indices)
term2 = 2.0 * _safe_div(math_ops.square(sum_diff),
math_ops.square(num_present_per_batch))
loss = _scale_losses(term1 - term2, weights)
mean_loss = array_ops.where(math_ops.reduce_sum(num_present_per_batch) > 0,
loss,
array_ops.zeros_like(loss),
name="value")
add_loss(mean_loss)
return mean_loss
@deprecated("2016-12-30", "Use tf.losses.cosine_distance instead.")
def cosine_distance(
predictions, labels=None, dim=None, weights=1.0, scope=None):
"""Adds a cosine-distance loss to the training procedure.
Note that the function assumes that `predictions` and `labels` are already
unit-normalized.
Args:
predictions: An arbitrary matrix.
labels: A `Tensor` whose shape matches 'predictions'
dim: The dimension along which the cosine distance is computed.
weights: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If `predictions` shape doesn't match `labels` shape, or
`weights` is `None`.
"""
if dim is None:
raise ValueError("`dim` cannot be None.")
with ops.name_scope(scope, "cosine_distance_loss",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
radial_diffs = math_ops.multiply(predictions, labels)
losses = 1 - math_ops.reduce_sum(radial_diffs, reduction_indices=[dim,])
return compute_weighted_loss(losses, weights, scope=scope)
|
spatialaudio/sweep
|
refs/heads/master
|
lin_sweep_kaiser_window_script2/lin_sweep_kaiser_window_script2.py
|
2
|
#!/usr/bin/env python3
"""The influence of windowing of lin sweep signals when using a
Kaiser Window by fixing beta (=7) and fade_in (=0).
fstart = 1 Hz
fstop = 22050 Hz
Deconvolution: Windowed
"""
import sys
sys.path.append('..')
import measurement_chain
import plotting
import calculation
import generation
import matplotlib.pyplot as plt
import windows
from scipy.signal import lfilter, fftconvolve
import numpy as np
# Parameters of the measuring system
fs = 44100
fstart = 1
fstop = 22050
duration = 1
pad = 4
# Generate excitation signal
excitation = generation.lin_sweep(fstart, fstop, duration, fs)
N = len(excitation)
# Noise in measurement chain
noise_level_db = -30.
noise = measurement_chain.additive_noise(noise_level_db)
# FIR-Filter-System
dirac_system = measurement_chain.convolution([1.0])
# Combinate system elements
system = measurement_chain.chained(dirac_system, noise)
# Lists
beta = 7
fade_in = 0
fade_out_list = np.arange(0, 1001, 1)
# Spectrum of dirac for reference
dirac = np.zeros(pad * fs)
dirac[0] = 1
dirac_f = np.fft.rfft(dirac)
def get_results(fade_out):
excitation_windowed = excitation * windows.window_kaiser(N,
fade_in,
fade_out,
fs, beta)
excitation_windowed_zeropadded = generation.zero_padding(
excitation_windowed, pad, fs)
system_response = system(excitation_windowed_zeropadded)
ir = calculation.deconv_process(excitation_windowed_zeropadded,
system_response,
fs)
return ir
with open("lin_sweep_kaiser_window_script2.txt", "w") as f:
for fade_out in fade_out_list:
ir = get_results(fade_out)
pnr = calculation.pnr_db(ir[0], ir[1:4 * fs])
spectrum_distance = calculation.vector_distance(
dirac_f, np.fft.rfft(ir[:pad * fs]))
f.write(
str(fade_out) + " " + str(pnr)
+ " " + str(spectrum_distance) + " \n")
|
AKSW/LODStats
|
refs/heads/master
|
lodstats/stats/Entities.py
|
2
|
"""
Copyright 2013 AKSW Research Group http://aksw.org
This file is part of LODStats.
LODStats is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
LODStats is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with LODStats. If not, see <http://www.gnu.org/licenses/>.
"""
import lodstats.util.rdf_namespaces
from .RDFStatInterface import RDFStatInterface
import RDF
class Entities(RDFStatInterface):
"""
Distinct number of entities
Entity - triple, where ?s is iri (not blank)
"""
def __init__(self, results):
super(Entities, self).__init__(results)
#self.entities = []
self.c = 0
def count(self, s, p, o, s_blank, o_l, o_blank, statement):
if statement.object.is_resource() and\
statement.subject.is_resource() and\
statement.predicate.is_resource():
#self.entities.append( (s,p,o) )
self.c += 1
def postproc(self):
#Entities mentioned
self.results['count'] = self.c
#Distinct entities
#self.results['triples'] = self.triples
def voidify(self, void_model, dataset):
namespaces = lodstats.util.rdf_namespaces.RDFNamespaces()
datatype_uri = namespaces.get_rdf_namespace("xsd").integer.uri
number_of_distinct_entities = str(self.results['count'])
number_of_entities_node = RDF.Node(literal=number_of_distinct_entities,
datatype=datatype_uri)
void_model.append(RDF.Statement(dataset,
namespaces.get_rdf_namespace("void").entities,
number_of_entities_node))
def sparql(self, endpoint):
pass
|
MiiBond/three.js
|
refs/heads/master
|
utils/exporters/fbx/modules/win/Python26_x86/FbxCommon.py
|
21
|
from fbx import *
import sys
def InitializeSdkObjects():
# The first thing to do is to create the FBX SDK manager which is the
# object allocator for almost all the classes in the SDK.
lSdkManager = KFbxSdkManager.Create()
if not lSdkManager:
sys.exit(0)
# Create an IOSettings object
ios = KFbxIOSettings.Create(lSdkManager, IOSROOT)
lSdkManager.SetIOSettings(ios)
# Create the entity that will hold the scene.
lScene = KFbxScene.Create(lSdkManager, "")
return (lSdkManager, lScene)
def SaveScene(pSdkManager, pScene, pFilename, pFileFormat = -1, pEmbedMedia = False):
lExporter = KFbxExporter.Create(pSdkManager, "")
if pFileFormat < 0 or pFileFormat >= pSdkManager.GetIOPluginRegistry().GetWriteFormatCount():
pFileFormat = pSdkManager.GetIOPluginRegistry().GetNativeWriterFormat()
if not pEmbedMedia:
lFormatCount = pSdkManager.GetIOPluginRegistry().GetWriterFormatCount()
for lFormatIndex in range(lFormatCount):
if pSdkManager.GetIOPluginRegistry().WriterIsFBX(lFormatIndex):
lDesc = KString(pSdkManager.GetIOPluginRegistry().GetWriterFormatDescription(lFormatIndex))
if lDesc.Find("ascii") >= 0:
pFileFormat = lFormatIndex
break
if not pSdkManager.GetIOSettings():
ios = KFbxIOSettings.Create(pSdkManager, IOSROOT)
pSdkManager.SetIOSettings(ios)
pSdkManager.GetIOSettings().SetBoolProp(EXP_FBX_MATERIAL, True)
pSdkManager.GetIOSettings().SetBoolProp(EXP_FBX_TEXTURE, True)
pSdkManager.GetIOSettings().SetBoolProp(EXP_FBX_EMBEDDED, pEmbedMedia)
pSdkManager.GetIOSettings().SetBoolProp(EXP_FBX_SHAPE, True)
pSdkManager.GetIOSettings().SetBoolProp(EXP_FBX_GOBO, True)
pSdkManager.GetIOSettings().SetBoolProp(EXP_FBX_ANIMATION, True)
pSdkManager.GetIOSettings().SetBoolProp(EXP_FBX_GLOBAL_SETTINGS, True)
if lExporter.Initialize(pFilename, pFileFormat, pSdkManager.GetIOSettings()):
lExporter.Export(pScene)
lExporter.Destroy()
def LoadScene(pSdkManager, pScene, pFileName):
lImporter = KFbxImporter.Create(pSdkManager, "")
result = lImporter.Initialize(pFileName, -1, pSdkManager.GetIOSettings())
if not result:
return False
if lImporter.IsFBX():
pSdkManager.GetIOSettings().SetBoolProp(EXP_FBX_MATERIAL, True)
pSdkManager.GetIOSettings().SetBoolProp(EXP_FBX_TEXTURE, True)
pSdkManager.GetIOSettings().SetBoolProp(EXP_FBX_EMBEDDED, True)
pSdkManager.GetIOSettings().SetBoolProp(EXP_FBX_SHAPE, True)
pSdkManager.GetIOSettings().SetBoolProp(EXP_FBX_GOBO, True)
pSdkManager.GetIOSettings().SetBoolProp(EXP_FBX_ANIMATION, True)
pSdkManager.GetIOSettings().SetBoolProp(EXP_FBX_GLOBAL_SETTINGS, True)
result = lImporter.Import(pScene)
lImporter.Destroy()
return result
|
OmarIthawi/edx-platform
|
refs/heads/master
|
common/djangoapps/student/migrations/0019_create_approved_demographic_fields_fall_2012.py
|
188
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'UserProfile.occupation'
db.delete_column('auth_userprofile', 'occupation')
# Deleting field 'UserProfile.telephone_number'
db.delete_column('auth_userprofile', 'telephone_number')
# Deleting field 'UserProfile.date_of_birth'
db.delete_column('auth_userprofile', 'date_of_birth')
# Deleting field 'UserProfile.country'
db.delete_column('auth_userprofile', 'country')
# Adding field 'UserProfile.year_of_birth'
db.add_column('auth_userprofile', 'year_of_birth',
self.gf('django.db.models.fields.IntegerField')(db_index=True, null=True, blank=True),
keep_default=False)
# Adding field 'UserProfile.level_of_education'
db.add_column('auth_userprofile', 'level_of_education',
self.gf('django.db.models.fields.CharField')(db_index=True, max_length=6, null=True, blank=True),
keep_default=False)
# Adding field 'UserProfile.goals'
db.add_column('auth_userprofile', 'goals',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
# Adding index on 'UserProfile', fields ['gender']
db.create_index('auth_userprofile', ['gender'])
def backwards(self, orm):
# Removing index on 'UserProfile', fields ['gender']
db.delete_index('auth_userprofile', ['gender'])
# Adding field 'UserProfile.occupation'
db.add_column('auth_userprofile', 'occupation',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
# Adding field 'UserProfile.telephone_number'
db.add_column('auth_userprofile', 'telephone_number',
self.gf('django.db.models.fields.CharField')(max_length=25, null=True, blank=True),
keep_default=False)
# Adding field 'UserProfile.date_of_birth'
db.add_column('auth_userprofile', 'date_of_birth',
self.gf('django.db.models.fields.DateField')(null=True, blank=True),
keep_default=False)
# Adding field 'UserProfile.country'
db.add_column('auth_userprofile', 'country',
self.gf('django_countries.fields.CountryField')(max_length=2, null=True, blank=True),
keep_default=False)
# Deleting field 'UserProfile.year_of_birth'
db.delete_column('auth_userprofile', 'year_of_birth')
# Deleting field 'UserProfile.level_of_education'
db.delete_column('auth_userprofile', 'level_of_education')
# Deleting field 'UserProfile.goals'
db.delete_column('auth_userprofile', 'goals')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.courseenrollment': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.pendingemailchange': {
'Meta': {'object_name': 'PendingEmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.pendingnamechange': {
'Meta': {'object_name': 'PendingNameChange'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'goals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'mailing_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}),
'year_of_birth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'student.usertestgroup': {
'Meta': {'object_name': 'UserTestGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['student']
|
xiandiancloud/edxplaltfom-xusong
|
refs/heads/master
|
common/djangoapps/student/tests/test_authz.py
|
61
|
"""
Tests authz.py
"""
import mock
from django.test import TestCase
from django.contrib.auth.models import User, AnonymousUser
from django.core.exceptions import PermissionDenied
from student.roles import CourseInstructorRole, CourseStaffRole, CourseCreatorRole
from student.tests.factories import AdminFactory
from student.auth import has_access, add_users, remove_users
from opaque_keys.edx.locations import SlashSeparatedCourseKey
class CreatorGroupTest(TestCase):
"""
Tests for the course creator group.
"""
def setUp(self):
""" Test case setup """
self.user = User.objects.create_user('testuser', 'test+courses@edx.org', 'foo')
self.admin = User.objects.create_user('Mark', 'admin+courses@edx.org', 'foo')
self.admin.is_staff = True
def test_creator_group_not_enabled(self):
"""
Tests that CourseCreatorRole().has_user always returns True if ENABLE_CREATOR_GROUP
and DISABLE_COURSE_CREATION are both not turned on.
"""
self.assertTrue(has_access(self.user, CourseCreatorRole()))
def test_creator_group_enabled_but_empty(self):
""" Tests creator group feature on, but group empty. """
with mock.patch.dict('django.conf.settings.FEATURES', {"ENABLE_CREATOR_GROUP": True}):
self.assertFalse(has_access(self.user, CourseCreatorRole()))
# Make user staff. This will cause CourseCreatorRole().has_user to return True.
self.user.is_staff = True
self.assertTrue(has_access(self.user, CourseCreatorRole()))
def test_creator_group_enabled_nonempty(self):
""" Tests creator group feature on, user added. """
with mock.patch.dict('django.conf.settings.FEATURES', {"ENABLE_CREATOR_GROUP": True}):
add_users(self.admin, CourseCreatorRole(), self.user)
self.assertTrue(has_access(self.user, CourseCreatorRole()))
# check that a user who has not been added to the group still returns false
user_not_added = User.objects.create_user('testuser2', 'test+courses2@edx.org', 'foo2')
self.assertFalse(has_access(user_not_added, CourseCreatorRole()))
# remove first user from the group and verify that CourseCreatorRole().has_user now returns false
remove_users(self.admin, CourseCreatorRole(), self.user)
self.assertFalse(has_access(self.user, CourseCreatorRole()))
def test_course_creation_disabled(self):
""" Tests that the COURSE_CREATION_DISABLED flag overrides course creator group settings. """
with mock.patch.dict('django.conf.settings.FEATURES',
{'DISABLE_COURSE_CREATION': True, "ENABLE_CREATOR_GROUP": True}):
# Add user to creator group.
add_users(self.admin, CourseCreatorRole(), self.user)
# DISABLE_COURSE_CREATION overrides (user is not marked as staff).
self.assertFalse(has_access(self.user, CourseCreatorRole()))
# Mark as staff. Now CourseCreatorRole().has_user returns true.
self.user.is_staff = True
self.assertTrue(has_access(self.user, CourseCreatorRole()))
# Remove user from creator group. CourseCreatorRole().has_user still returns true because is_staff=True
remove_users(self.admin, CourseCreatorRole(), self.user)
self.assertTrue(has_access(self.user, CourseCreatorRole()))
def test_add_user_not_authenticated(self):
"""
Tests that adding to creator group fails if user is not authenticated
"""
with mock.patch.dict(
'django.conf.settings.FEATURES',
{'DISABLE_COURSE_CREATION': False, "ENABLE_CREATOR_GROUP": True}
):
anonymous_user = AnonymousUser()
role = CourseCreatorRole()
add_users(self.admin, role, anonymous_user)
self.assertFalse(has_access(anonymous_user, role))
def test_add_user_not_active(self):
"""
Tests that adding to creator group fails if user is not active
"""
with mock.patch.dict(
'django.conf.settings.FEATURES',
{'DISABLE_COURSE_CREATION': False, "ENABLE_CREATOR_GROUP": True}
):
self.user.is_active = False
add_users(self.admin, CourseCreatorRole(), self.user)
self.assertFalse(has_access(self.user, CourseCreatorRole()))
def test_add_user_to_group_requires_staff_access(self):
with self.assertRaises(PermissionDenied):
self.admin.is_staff = False
add_users(self.admin, CourseCreatorRole(), self.user)
with self.assertRaises(PermissionDenied):
add_users(self.user, CourseCreatorRole(), self.user)
def test_add_user_to_group_requires_active(self):
with self.assertRaises(PermissionDenied):
self.admin.is_active = False
add_users(self.admin, CourseCreatorRole(), self.user)
def test_add_user_to_group_requires_authenticated(self):
with self.assertRaises(PermissionDenied):
self.admin.is_authenticated = mock.Mock(return_value=False)
add_users(self.admin, CourseCreatorRole(), self.user)
def test_remove_user_from_group_requires_staff_access(self):
with self.assertRaises(PermissionDenied):
self.admin.is_staff = False
remove_users(self.admin, CourseCreatorRole(), self.user)
def test_remove_user_from_group_requires_active(self):
with self.assertRaises(PermissionDenied):
self.admin.is_active = False
remove_users(self.admin, CourseCreatorRole(), self.user)
def test_remove_user_from_group_requires_authenticated(self):
with self.assertRaises(PermissionDenied):
self.admin.is_authenticated = mock.Mock(return_value=False)
remove_users(self.admin, CourseCreatorRole(), self.user)
class CourseGroupTest(TestCase):
"""
Tests for instructor and staff groups for a particular course.
"""
def setUp(self):
""" Test case setup """
self.global_admin = AdminFactory()
self.creator = User.objects.create_user('testcreator', 'testcreator+courses@edx.org', 'foo')
self.staff = User.objects.create_user('teststaff', 'teststaff+courses@edx.org', 'foo')
self.course_key = SlashSeparatedCourseKey('mitX', '101', 'test')
def test_add_user_to_course_group(self):
"""
Tests adding user to course group (happy path).
"""
# Create groups for a new course (and assign instructor role to the creator).
self.assertFalse(has_access(self.creator, CourseInstructorRole(self.course_key)))
add_users(self.global_admin, CourseInstructorRole(self.course_key), self.creator)
add_users(self.global_admin, CourseStaffRole(self.course_key), self.creator)
self.assertTrue(has_access(self.creator, CourseInstructorRole(self.course_key)))
# Add another user to the staff role.
self.assertFalse(has_access(self.staff, CourseStaffRole(self.course_key)))
add_users(self.creator, CourseStaffRole(self.course_key), self.staff)
self.assertTrue(has_access(self.staff, CourseStaffRole(self.course_key)))
def test_add_user_to_course_group_permission_denied(self):
"""
Verifies PermissionDenied if caller of add_user_to_course_group is not instructor role.
"""
add_users(self.global_admin, CourseInstructorRole(self.course_key), self.creator)
add_users(self.global_admin, CourseStaffRole(self.course_key), self.creator)
with self.assertRaises(PermissionDenied):
add_users(self.staff, CourseStaffRole(self.course_key), self.staff)
def test_remove_user_from_course_group(self):
"""
Tests removing user from course group (happy path).
"""
add_users(self.global_admin, CourseInstructorRole(self.course_key), self.creator)
add_users(self.global_admin, CourseStaffRole(self.course_key), self.creator)
add_users(self.creator, CourseStaffRole(self.course_key), self.staff)
self.assertTrue(has_access(self.staff, CourseStaffRole(self.course_key)))
remove_users(self.creator, CourseStaffRole(self.course_key), self.staff)
self.assertFalse(has_access(self.staff, CourseStaffRole(self.course_key)))
remove_users(self.creator, CourseInstructorRole(self.course_key), self.creator)
self.assertFalse(has_access(self.creator, CourseInstructorRole(self.course_key)))
def test_remove_user_from_course_group_permission_denied(self):
"""
Verifies PermissionDenied if caller of remove_user_from_course_group is not instructor role.
"""
add_users(self.global_admin, CourseInstructorRole(self.course_key), self.creator)
another_staff = User.objects.create_user('another', 'teststaff+anothercourses@edx.org', 'foo')
add_users(self.global_admin, CourseStaffRole(self.course_key), self.creator, self.staff, another_staff)
with self.assertRaises(PermissionDenied):
remove_users(self.staff, CourseStaffRole(self.course_key), another_staff)
|
allotria/intellij-community
|
refs/heads/master
|
python/testData/addImport/localFromImport.after.py
|
76
|
def func():
for _ range(10):
from package.module import foo
foo
# <ref>
|
AndKe/MAVProxy
|
refs/heads/master
|
MAVProxy/modules/mavproxy_checklist.py
|
5
|
#!/usr/bin/env python
'''
Checklist module
Stephen Dade
July 2014
'''
import math, sys, os, time
from MAVProxy.modules.lib import mp_checklist
from MAVProxy.modules.lib import mp_module
from pymavlink import mavutil
class ChecklistModule(mp_module.MPModule):
def __init__(self, mpstate):
super(ChecklistModule, self).__init__(mpstate, "checklist", "checklist module")
self.checklist = mp_checklist.CheckUI()
def mavlink_packet(self, msg):
'''handle an incoming mavlink packet'''
if not isinstance(self.checklist, mp_checklist.CheckUI):
return
if not self.checklist.is_alive():
return
type = msg.get_type()
master = self.master
if type == 'HEARTBEAT':
'''beforeEngineList - APM booted'''
if self.mpstate.status.heartbeat_error == True:
self.checklist.set_check("Pixhawk Booted", 0)
else:
self.checklist.set_check("Pixhawk Booted", 1)
'''beforeEngineList - Flight mode MANUAL'''
if self.mpstate.status.flightmode == "MANUAL":
self.checklist.set_check("Flight mode MANUAL", 1)
else:
self.checklist.set_check("Flight mode MANUAL", 0)
if type in [ 'GPS_RAW', 'GPS_RAW_INT' ]:
'''beforeEngineList - GPS lock'''
if ((msg.fix_type >= 3 and master.mavlink10()) or
(msg.fix_type == 2 and not master.mavlink10())):
self.checklist.set_check("GPS lock", 1)
else:
self.checklist.set_check("GPS lock", 0)
'''beforeEngineList - Radio Links > 6db margin TODO: figure out how to read db levels'''
if type in ['RADIO', 'RADIO_STATUS']:
if msg.rssi < msg.noise+6 or msg.remrssi < msg.remnoise+6:
self.checklist.set_check("Radio links > 6db margin", 0)
else:
self.checklist.set_check("Radio Links > 6db margin", 0)
if type == 'HWSTATUS':
'''beforeEngineList - Avionics Battery'''
if msg.Vcc >= 4600 and msg.Vcc <= 5300:
self.checklist.set_check("Avionics Power", 1)
else:
self.checklist.set_check("Avionics Power", 0)
if type == 'POWER_STATUS':
'''beforeEngineList - Servo Power'''
if msg.Vservo >= 4900 and msg.Vservo <= 6500:
self.checklist.set_check("Servo Power", 1)
else:
self.checklist.set_check("Servo Power", 0)
'''beforeEngineList - Waypoints Loaded'''
if type == 'HEARTBEAT':
if self.module('wp').wploader.count() == 0:
self.checklist.set_check("Waypoints Loaded", 0)
else:
self.checklist.set_check("Waypoints Loaded", 1)
'''beforeTakeoffList - Compass active'''
if type == 'GPS_RAW':
if math.fabs(msg.hdg - master.field('VFR_HUD', 'heading', '-')) < 10 or math.fabs(msg.hdg - master.field('VFR_HUD', 'heading', '-')) > 355:
self.checklist.set_check("Compass active", 1)
else:
self.checklist.set_check("Compass active", 0)
'''beforeCruiseList - Airspeed > 10 m/s , Altitude > 30 m'''
if type == 'VFR_HUD':
rel_alt = master.field('GLOBAL_POSITION_INT', 'relative_alt', 0) * 1.0e-3
if rel_alt > 30:
self.checklist.set_check("Altitude > 30 m", 1)
else:
self.checklist.set_check("Altitude > 30 m", 0)
if msg.airspeed > 10 or msg.groundspeed > 10:
self.checklist.set_check("Airspeed > 10 m/s", 1)
else:
self.checklist.set_check("Airspeed > 10 m/s", 0)
'''beforeEngineList - IMU'''
if type in ['SYS_STATUS']:
sensors = { 'AS' : mavutil.mavlink.MAV_SYS_STATUS_SENSOR_DIFFERENTIAL_PRESSURE,
'MAG' : mavutil.mavlink.MAV_SYS_STATUS_SENSOR_3D_MAG,
'INS' : mavutil.mavlink.MAV_SYS_STATUS_SENSOR_3D_ACCEL | mavutil.mavlink.MAV_SYS_STATUS_SENSOR_3D_GYRO,
'AHRS' : mavutil.mavlink.MAV_SYS_STATUS_AHRS}
bits = sensors['INS']
present = ((msg.onboard_control_sensors_enabled & bits) == bits)
healthy = ((msg.onboard_control_sensors_health & bits) == bits)
if not present or not healthy:
self.checklist.set_check("IMU Check", 1)
else:
self.checklist.set_check("IMU Check", 0)
def init(mpstate):
'''initialise module'''
return ChecklistModule(mpstate)
|
lumig242/Hue-Integration-with-CDAP
|
refs/heads/pull3
|
desktop/core/ext-py/boto-2.38.0/boto/manage/test_manage.py
|
153
|
from boto.manage.server import Server
from boto.manage.volume import Volume
import time
print('--> Creating New Volume')
volume = Volume.create()
print(volume)
print('--> Creating New Server')
server_list = Server.create()
server = server_list[0]
print(server)
print('----> Waiting for Server to start up')
while server.status != 'running':
print('*')
time.sleep(10)
print('----> Server is running')
print('--> Run "df -k" on Server')
status = server.run('df -k')
print(status[1])
print('--> Now run volume.make_ready to make the volume ready to use on server')
volume.make_ready(server)
print('--> Run "df -k" on Server')
status = server.run('df -k')
print(status[1])
print('--> Do an "ls -al" on the new filesystem')
status = server.run('ls -al %s' % volume.mount_point)
print(status[1])
|
Kast0rTr0y/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/nxos/nxos_snmp_community.py
|
11
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: nxos_snmp_community
version_added: "2.2"
short_description: Manages SNMP community configs.
description:
- Manages SNMP community configuration.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
options:
community:
description:
- Case-sensitive community string.
required: true
access:
description:
- Access type for community.
required: false
default: null
choices: ['ro','rw']
group:
description:
- Group to which the community belongs.
required: false
default: null
acl:
description:
- ACL name to filter snmp requests.
required: false
default: 1
state:
description:
- Manage the state of the resource.
required: true
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# ensure snmp community is configured
- nxos_snmp_community:
community: TESTING7
group: network-operator
state: present
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"group": "network-operator"}
existing:
description: k/v pairs of existing snmp community
type: dict
sample: {}
end_state:
description: k/v pairs of snmp community after module execution
returned: always
type: dict or null
sample: {"acl": "None", "group": "network-operator"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["snmp-server community TESTING7 group network-operator"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import CustomNetworkConfig
import re
import re
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
if 'show run' not in command:
command += ' | json'
cmds = [command]
body = run_commands(module, cmds)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = run_commands(module, cmds)
return body
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = str(value)
else:
new_dict[new_key] = value
return new_dict
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_snmp_groups(module):
command = 'show snmp group'
data = execute_show_command(command, module)[0]
group_list = []
try:
group_table = data['TABLE_role']['ROW_role']
for group in group_table:
group_list.append(group['role_name'])
except (KeyError, AttributeError):
return group_list
return group_list
def get_snmp_community(module, find_filter=None):
command = 'show snmp community'
data = execute_show_command(command, module)[0]
community_dict = {}
community_map = {
'grouporaccess': 'group',
'aclfilter': 'acl'
}
try:
community_table = data['TABLE_snmp_community']['ROW_snmp_community']
for each in community_table:
community = apply_key_map(community_map, each)
key = each['community_name']
community_dict[key] = community
except (KeyError, AttributeError):
return community_dict
if find_filter:
find = community_dict.get(find_filter, None)
if find_filter is None or find is None:
return {}
else:
fix_find = {}
for (key, value) in find.items():
if isinstance(value, str):
fix_find[key] = value.strip()
else:
fix_find[key] = value
return fix_find
def config_snmp_community(delta, community):
CMDS = {
'group': 'snmp-server community {0} group {group}',
'acl': 'snmp-server community {0} use-acl {acl}'
}
commands = []
for k, v in delta.items():
cmd = CMDS.get(k).format(community, **delta)
if cmd:
commands.append(cmd)
cmd = None
return commands
def main():
argument_spec = dict(
community=dict(required=True, type='str'),
access=dict(choices=['ro', 'rw']),
group=dict(type='str'),
acl=dict(type='str'),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=[['access', 'group']],
mutually_exclusive=[['access', 'group']],
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
access = module.params['access']
group = module.params['group']
community = module.params['community']
acl = module.params['acl']
state = module.params['state']
if access:
if access == 'ro':
group = 'network-operator'
elif access == 'rw':
group = 'network-admin'
# group check - ensure group being configured exists on the device
configured_groups = get_snmp_groups(module)
if group not in configured_groups:
module.fail_json(msg="group not on switch."
"please add before moving forward")
existing = get_snmp_community(module, community)
args = dict(group=group, acl=acl)
proposed = dict((k, v) for k, v in args.items() if v is not None)
delta = dict(set(proposed.items()).difference(existing.items()))
changed = False
end_state = existing
commands = []
if state == 'absent':
if existing:
command = "no snmp-server community {0}".format(community)
commands.append(command)
cmds = flatten_list(commands)
elif state == 'present':
if delta:
command = config_snmp_community(dict(delta), community)
commands.append(command)
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
load_config(module, cmds)
end_state = get_snmp_community(module, community)
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
module.exit_json(**results)
if __name__ == '__main__':
main()
|
cambid/clcms
|
refs/heads/master
|
bin/clcms_create_galleries.py
|
1
|
#!/usr/bin/env python
#
# This script will create clcms compatible gallery pages for the current directory
#
# The images in the current directory must be of the form:
# <nr>.<title>.<extension>
#
# Prerequisites: ImageMagick (for the convert tool)
# TODO: implement this with pixbuf?
#
# The default input directory is galleries,
# Every subdir of that should be a directory with images
# the output will be put in out/
#
# for now, most values are default only, command line options are in the TODO
import os
from os.path import join, getsize
import stat
import shutil
import re
import commands
in_dir = "galleries"
out_dir = "out"
root_dir = os.getcwd()
thumbnail_size = "64x64"
image_file_p = re.compile("([0-9]+)\.(.*)\.([a-zA-Z]+)")
if in_dir[0] != "/":
in_dir = root_dir + os.sep + in_dir
if out_dir[0] != "/":
out_dir = root_dir + os.sep + out_dir
if not os.path.isdir(in_dir):
print "No such directory: " + in_dir
sys.exit(1)
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
os.chdir(in_dir)
gallery_names = []
dir_files = os.listdir(".")
top_page_lines = []
cur_gallery_lines = []
for d in dir_files:
if d != "." and d != ".." and os.path.isdir(d):
print "Gallery: " + d
cur_output_dir = out_dir + os.sep + d
if not os.path.isdir(cur_output_dir):
os.mkdir(cur_output_dir)
os.chdir(d)
image_files = os.listdir(".")
cur_gallery_lines = []
for i in image_files:
# TODO copy every .page to out/<Gallery>
image_file_m = image_file_p.match(i)
if image_file_m:
# Copy image, and create thumbnail
# escape spaces:
output_file = cur_output_dir + os.sep + i
cur_file_name = output_file.replace(" ", "\\ ")
cur_file_name = cur_file_name.replace("&", "\\&")
thumbnail_file = image_file_m.group(1) + "." + image_file_m.group(2) + "_small." + image_file_m.group(3)
thumbnail_abs = cur_output_dir + os.sep + thumbnail_file
cur_thumbnail_name = thumbnail_abs.replace(" ", "\\ ")
cur_thumbnail_name = cur_thumbnail_name.replace("&", "\\&")
shutil.copy2(i, cur_output_dir)
(result, output) = commands.getstatusoutput("convert -scale " + thumbnail_size + " " + cur_file_name + " " + cur_thumbnail_name)
if result != 0:
print "convert -scale " + thumbnail_size + " " + output_file + " " + thumbnail_abs
print "Error: "+str(result)
print "Output: "+output
print ""
# cur_gallery_lines.append("[[Image:"+thumbnail_file+"|"+image_file_m.group(2)+"]")
cur_gallery_lines.append("[[" + i + "][[[Image:"+thumbnail_file+"|"+image_file_m.group(2)+"]]]]")
o_file = open(cur_output_dir + os.sep + "gallery.page", "w")
for l in cur_gallery_lines:
o_file.write(l + "\n")
o_file.close
|
CoderDojoLX/appinventor-sources
|
refs/heads/master
|
appinventor/rendezvous/bottle.py
|
158
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Bottle is a fast and simple micro-framework for small web applications. It
offers request dispatching (Routes) with url parameter support, templates,
a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
template engines - all in a single file and with no dependencies other than the
Python Standard Library.
Homepage and documentation: http://bottlepy.org/
Copyright (c) 2011, Marcel Hellkamp.
License: MIT (see LICENSE.txt for details)
"""
from __future__ import with_statement
__author__ = 'Marcel Hellkamp'
__version__ = '0.10.6'
__license__ = 'MIT'
# The gevent server adapter needs to patch some modules before they are imported
# This is why we parse the commandline parameters here but handle them later
if __name__ == '__main__':
from optparse import OptionParser
_cmd_parser = OptionParser(usage="usage: %prog [options] package.module:app")
_opt = _cmd_parser.add_option
_opt("--version", action="store_true", help="show version number.")
_opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.")
_opt("-s", "--server", default='wsgiref', help="use SERVER as backend.")
_opt("-p", "--plugin", action="append", help="install additional plugin/s.")
_opt("--debug", action="store_true", help="start server in debug mode.")
_opt("--reload", action="store_true", help="auto-reload on file changes.")
_cmd_options, _cmd_args = _cmd_parser.parse_args()
if _cmd_options.server and _cmd_options.server.startswith('gevent'):
import gevent.monkey; gevent.monkey.patch_all()
import sys
import base64
import cgi
import email.utils
import functools
import hmac
import httplib
import imp
import itertools
import mimetypes
import os
import re
import subprocess
import tempfile
import thread
import threading
import time
import warnings
from Cookie import SimpleCookie
from datetime import date as datedate, datetime, timedelta
from tempfile import TemporaryFile
from traceback import format_exc, print_exc
from urlparse import urljoin, SplitResult as UrlSplitResult
# Workaround for a bug in some versions of lib2to3 (fixed on CPython 2.7 and 3.2)
import urllib
urlencode = urllib.urlencode
urlquote = urllib.quote
urlunquote = urllib.unquote
try: from collections import MutableMapping as DictMixin
except ImportError: # pragma: no cover
from UserDict import DictMixin
try: from urlparse import parse_qs
except ImportError: # pragma: no cover
from cgi import parse_qs
try: import cPickle as pickle
except ImportError: # pragma: no cover
import pickle
try: from json import dumps as json_dumps, loads as json_lds
except ImportError: # pragma: no cover
try: from simplejson import dumps as json_dumps, loads as json_lds
except ImportError: # pragma: no cover
try: from django.utils.simplejson import dumps as json_dumps, loads as json_lds
except ImportError: # pragma: no cover
def json_dumps(data):
raise ImportError("JSON support requires Python 2.6 or simplejson.")
json_lds = json_dumps
py3k = sys.version_info >= (3,0,0)
NCTextIOWrapper = None
if sys.version_info < (2,6,0):
msg = "Python 2.5 support may be dropped in future versions of Bottle."
warnings.warn(msg, DeprecationWarning)
if py3k: # pragma: no cover
json_loads = lambda s: json_lds(touni(s))
# See Request.POST
from io import BytesIO
def touni(x, enc='utf8', err='strict'):
""" Convert anything to unicode """
return str(x, enc, err) if isinstance(x, bytes) else str(x)
if sys.version_info < (3,2,0):
from io import TextIOWrapper
class NCTextIOWrapper(TextIOWrapper):
''' Garbage collecting an io.TextIOWrapper(buffer) instance closes
the wrapped buffer. This subclass keeps it open. '''
def close(self): pass
else:
json_loads = json_lds
from StringIO import StringIO as BytesIO
bytes = str
def touni(x, enc='utf8', err='strict'):
""" Convert anything to unicode """
return x if isinstance(x, unicode) else unicode(str(x), enc, err)
def tob(data, enc='utf8'):
""" Convert anything to bytes """
return data.encode(enc) if isinstance(data, unicode) else bytes(data)
tonat = touni if py3k else tob
tonat.__doc__ = """ Convert anything to native strings """
def try_update_wrapper(wrapper, wrapped, *a, **ka):
try: # Bug: functools breaks if wrapper is an instane method
functools.update_wrapper(wrapper, wrapped, *a, **ka)
except AttributeError: pass
# Backward compatibility
def depr(message):
warnings.warn(message, DeprecationWarning, stacklevel=3)
# Small helpers
def makelist(data):
if isinstance(data, (tuple, list, set, dict)): return list(data)
elif data: return [data]
else: return []
class DictProperty(object):
''' Property that maps to a key in a local dict-like attribute. '''
def __init__(self, attr, key=None, read_only=False):
self.attr, self.key, self.read_only = attr, key, read_only
def __call__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter, self.key = func, self.key or func.__name__
return self
def __get__(self, obj, cls):
if obj is None: return self
key, storage = self.key, getattr(obj, self.attr)
if key not in storage: storage[key] = self.getter(obj)
return storage[key]
def __set__(self, obj, value):
if self.read_only: raise AttributeError("Read-Only property.")
getattr(obj, self.attr)[self.key] = value
def __delete__(self, obj):
if self.read_only: raise AttributeError("Read-Only property.")
del getattr(obj, self.attr)[self.key]
class CachedProperty(object):
''' A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property. '''
def __init__(self, func):
self.func = func
def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
cached_property = CachedProperty
class lazy_attribute(object): # Does not need configuration -> lower-case name
''' A property that caches itself to the class object. '''
def __init__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter = func
def __get__(self, obj, cls):
value = self.getter(cls)
setattr(cls, self.__name__, value)
return value
###############################################################################
# Exceptions and Events ########################################################
###############################################################################
class BottleException(Exception):
""" A base class for exceptions used by bottle. """
pass
#TODO: These should subclass BaseRequest
class HTTPResponse(BottleException):
""" Used to break execution and immediately finish the response """
def __init__(self, output='', status=200, header=None):
super(BottleException, self).__init__("HTTP Response %d" % status)
self.status = int(status)
self.output = output
self.headers = HeaderDict(header) if header else None
def apply(self, response):
if self.headers:
for key, value in self.headers.iterallitems():
response.headers[key] = value
response.status = self.status
class HTTPError(HTTPResponse):
""" Used to generate an error page """
def __init__(self, code=500, output='Unknown Error', exception=None,
traceback=None, header=None):
super(HTTPError, self).__init__(output, code, header)
self.exception = exception
self.traceback = traceback
def __repr__(self):
return tonat(template(ERROR_PAGE_TEMPLATE, e=self))
###############################################################################
# Routing ######################################################################
###############################################################################
class RouteError(BottleException):
""" This is a base class for all routing related exceptions """
class RouteReset(BottleException):
""" If raised by a plugin or request handler, the route is reset and all
plugins are re-applied. """
class RouterUnknownModeError(RouteError): pass
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router """
class RouteBuildError(RouteError):
""" The route could not been built """
class Router(object):
''' A Router is an ordered collection of route->target pairs. It is used to
efficiently match WSGI requests against a number of routes and return
the first target that satisfies the request. The target may be anything,
usually a string, ID or callable object. A route consists of a path-rule
and a HTTP method.
The path-rule is either a static path (e.g. `/contact`) or a dynamic
path that contains wildcards (e.g. `/wiki/<page>`). The wildcard syntax
and details on the matching order are described in docs:`routing`.
'''
default_pattern = '[^/]+'
default_filter = 're'
#: Sorry for the mess. It works. Trust me.
rule_syntax = re.compile('(\\\\*)'\
'(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)'\
'|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)'\
'(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))')
def __init__(self, strict=False):
self.rules = {} # A {rule: Rule} mapping
self.builder = {} # A rule/name->build_info mapping
self.static = {} # Cache for static routes: {path: {method: target}}
self.dynamic = [] # Cache for dynamic routes. See _compile()
#: If true, static routes are no longer checked first.
self.strict_order = strict
self.filters = {'re': self.re_filter, 'int': self.int_filter,
'float': self.float_filter, 'path': self.path_filter}
def re_filter(self, conf):
return conf or self.default_pattern, None, None
def int_filter(self, conf):
return r'-?\d+', int, lambda x: str(int(x))
def float_filter(self, conf):
return r'-?[\d.]+', float, lambda x: str(float(x))
def path_filter(self, conf):
return r'.*?', None, None
def add_filter(self, name, func):
''' Add a filter. The provided function is called with the configuration
string as parameter and must return a (regexp, to_python, to_url) tuple.
The first element is a string, the last two are callables or None. '''
self.filters[name] = func
def parse_rule(self, rule):
''' Parses a rule into a (name, filter, conf) token stream. If mode is
None, name contains a static rule part. '''
offset, prefix = 0, ''
for match in self.rule_syntax.finditer(rule):
prefix += rule[offset:match.start()]
g = match.groups()
if len(g[0])%2: # Escaped wildcard
prefix += match.group(0)[len(g[0]):]
offset = match.end()
continue
if prefix: yield prefix, None, None
name, filtr, conf = g[1:4] if not g[2] is None else g[4:7]
if not filtr: filtr = self.default_filter
yield name, filtr, conf or None
offset, prefix = match.end(), ''
if offset <= len(rule) or prefix:
yield prefix+rule[offset:], None, None
def add(self, rule, method, target, name=None):
''' Add a new route or replace the target for an existing route. '''
if rule in self.rules:
self.rules[rule][method] = target
if name: self.builder[name] = self.builder[rule]
return
target = self.rules[rule] = {method: target}
# Build pattern and other structures for dynamic routes
anons = 0 # Number of anonymous wildcards
pattern = '' # Regular expression pattern
filters = [] # Lists of wildcard input filters
builder = [] # Data structure for the URL builder
is_static = True
for key, mode, conf in self.parse_rule(rule):
if mode:
is_static = False
mask, in_filter, out_filter = self.filters[mode](conf)
if key:
pattern += '(?P<%s>%s)' % (key, mask)
else:
pattern += '(?:%s)' % mask
key = 'anon%d' % anons; anons += 1
if in_filter: filters.append((key, in_filter))
builder.append((key, out_filter or str))
elif key:
pattern += re.escape(key)
builder.append((None, key))
self.builder[rule] = builder
if name: self.builder[name] = builder
if is_static and not self.strict_order:
self.static[self.build(rule)] = target
return
def fpat_sub(m):
return m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:'
flat_pattern = re.sub(r'(\\*)(\(\?P<[^>]*>|\((?!\?))', fpat_sub, pattern)
try:
re_match = re.compile('^(%s)$' % pattern).match
except re.error, e:
raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, e))
def match(path):
""" Return an url-argument dictionary. """
url_args = re_match(path).groupdict()
for name, wildcard_filter in filters:
try:
url_args[name] = wildcard_filter(url_args[name])
except ValueError:
raise HTTPError(400, 'Path has wrong format.')
return url_args
try:
combined = '%s|(^%s$)' % (self.dynamic[-1][0].pattern, flat_pattern)
self.dynamic[-1] = (re.compile(combined), self.dynamic[-1][1])
self.dynamic[-1][1].append((match, target))
except (AssertionError, IndexError), e: # AssertionError: Too many groups
self.dynamic.append((re.compile('(^%s$)' % flat_pattern),
[(match, target)]))
return match
def build(self, _name, *anons, **query):
''' Build an URL by filling the wildcards in a rule. '''
builder = self.builder.get(_name)
if not builder: raise RouteBuildError("No route with that name.", _name)
try:
for i, value in enumerate(anons): query['anon%d'%i] = value
url = ''.join([f(query.pop(n)) if n else f for (n,f) in builder])
return url if not query else url+'?'+urlencode(query)
except KeyError, e:
raise RouteBuildError('Missing URL argument: %r' % e.args[0])
def match(self, environ):
''' Return a (target, url_agrs) tuple or raise HTTPError(400/404/405). '''
path, targets, urlargs = environ['PATH_INFO'] or '/', None, {}
if path in self.static:
targets = self.static[path]
else:
for combined, rules in self.dynamic:
match = combined.match(path)
if not match: continue
getargs, targets = rules[match.lastindex - 1]
urlargs = getargs(path) if getargs else {}
break
if not targets:
raise HTTPError(404, "Not found: " + repr(environ['PATH_INFO']))
method = environ['REQUEST_METHOD'].upper()
if method in targets:
return targets[method], urlargs
if method == 'HEAD' and 'GET' in targets:
return targets['GET'], urlargs
if 'ANY' in targets:
return targets['ANY'], urlargs
allowed = [verb for verb in targets if verb != 'ANY']
if 'GET' in allowed and 'HEAD' not in allowed:
allowed.append('HEAD')
raise HTTPError(405, "Method not allowed.",
header=[('Allow',",".join(allowed))])
class Route(object):
''' This class wraps a route callback along with route specific metadata and
configuration and applies Plugins on demand. It is also responsible for
turing an URL path rule into a regular expression usable by the Router.
'''
def __init__(self, app, rule, method, callback, name=None,
plugins=None, skiplist=None, **config):
#: The application this route is installed to.
self.app = app
#: The path-rule string (e.g. ``/wiki/:page``).
self.rule = rule
#: The HTTP method as a string (e.g. ``GET``).
self.method = method
#: The original callback with no plugins applied. Useful for introspection.
self.callback = callback
#: The name of the route (if specified) or ``None``.
self.name = name or None
#: A list of route-specific plugins (see :meth:`Bottle.route`).
self.plugins = plugins or []
#: A list of plugins to not apply to this route (see :meth:`Bottle.route`).
self.skiplist = skiplist or []
#: Additional keyword arguments passed to the :meth:`Bottle.route`
#: decorator are stored in this dictionary. Used for route-specific
#: plugin configuration and meta-data.
self.config = ConfigDict(config)
def __call__(self, *a, **ka):
depr("Some APIs changed to return Route() instances instead of"\
" callables. Make sure to use the Route.call method and not to"\
" call Route instances directly.")
return self.call(*a, **ka)
@cached_property
def call(self):
''' The route callback with all plugins applied. This property is
created on demand and then cached to speed up subsequent requests.'''
return self._make_callback()
def reset(self):
''' Forget any cached values. The next time :attr:`call` is accessed,
all plugins are re-applied. '''
self.__dict__.pop('call', None)
def prepare(self):
''' Do all on-demand work immediately (useful for debugging).'''
self.call
@property
def _context(self):
depr('Switch to Plugin API v2 and access the Route object directly.')
return dict(rule=self.rule, method=self.method, callback=self.callback,
name=self.name, app=self.app, config=self.config,
apply=self.plugins, skip=self.skiplist)
def all_plugins(self):
''' Yield all Plugins affecting this route. '''
unique = set()
for p in reversed(self.app.plugins + self.plugins):
if True in self.skiplist: break
name = getattr(p, 'name', False)
if name and (name in self.skiplist or name in unique): continue
if p in self.skiplist or type(p) in self.skiplist: continue
if name: unique.add(name)
yield p
def _make_callback(self):
callback = self.callback
for plugin in self.all_plugins():
try:
if hasattr(plugin, 'apply'):
api = getattr(plugin, 'api', 1)
context = self if api > 1 else self._context
callback = plugin.apply(callback, context)
else:
callback = plugin(callback)
except RouteReset: # Try again with changed configuration.
return self._make_callback()
if not callback is self.callback:
try_update_wrapper(callback, self.callback)
return callback
###############################################################################
# Application Object ###########################################################
###############################################################################
class Bottle(object):
""" WSGI application """
def __init__(self, catchall=True, autojson=True, config=None):
""" Create a new bottle instance.
You usually don't do that. Use `bottle.app.push()` instead.
"""
self.routes = [] # List of installed :class:`Route` instances.
self.router = Router() # Maps requests to :class:`Route` instances.
self.plugins = [] # List of installed plugins.
self.error_handler = {}
#: If true, most exceptions are catched and returned as :exc:`HTTPError`
self.config = ConfigDict(config or {})
self.catchall = catchall
#: An instance of :class:`HooksPlugin`. Empty by default.
self.hooks = HooksPlugin()
self.install(self.hooks)
if autojson:
self.install(JSONPlugin())
self.install(TemplatePlugin())
def mount(self, prefix, app, **options):
''' Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
root_app.mount('/admin/', admin_app)
:param prefix: path prefix or `mount-point`. If it ends in a slash,
that slash is mandatory.
:param app: an instance of :class:`Bottle` or a WSGI application.
All other parameters are passed to the underlying :meth:`route` call.
'''
if isinstance(app, basestring):
prefix, app = app, prefix
depr('Parameter order of Bottle.mount() changed.') # 0.10
parts = filter(None, prefix.split('/'))
if not parts: raise ValueError('Empty path prefix.')
path_depth = len(parts)
options.setdefault('skip', True)
options.setdefault('method', 'ANY')
@self.route('/%s/:#.*#' % '/'.join(parts), **options)
def mountpoint():
try:
request.path_shift(path_depth)
rs = BaseResponse([], 200)
def start_response(status, header):
rs.status = status
for name, value in header: rs.add_header(name, value)
return rs.body.append
rs.body = itertools.chain(rs.body, app(request.environ, start_response))
return HTTPResponse(rs.body, rs.status, rs.headers)
finally:
request.path_shift(-path_depth)
if not prefix.endswith('/'):
self.route('/' + '/'.join(parts), callback=mountpoint, **options)
def install(self, plugin):
''' Add a plugin to the list of plugins and prepare it for being
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
'''
if hasattr(plugin, 'setup'): plugin.setup(self)
if not callable(plugin) and not hasattr(plugin, 'apply'):
raise TypeError("Plugins must be callable or implement .apply()")
self.plugins.append(plugin)
self.reset()
return plugin
def uninstall(self, plugin):
''' Uninstall plugins. Pass an instance to remove a specific plugin, a type
object to remove all plugins that match that type, a string to remove
all plugins with a matching ``name`` attribute or ``True`` to remove all
plugins. Return the list of removed plugins. '''
removed, remove = [], plugin
for i, plugin in list(enumerate(self.plugins))[::-1]:
if remove is True or remove is plugin or remove is type(plugin) \
or getattr(plugin, 'name', True) == remove:
removed.append(plugin)
del self.plugins[i]
if hasattr(plugin, 'close'): plugin.close()
if removed: self.reset()
return removed
def reset(self, route=None):
''' Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected. '''
if route is None: routes = self.routes
elif isinstance(route, Route): routes = [route]
else: routes = [self.routes[route]]
for route in routes: route.reset()
if DEBUG:
for route in routes: route.prepare()
self.hooks.trigger('app_reset')
def close(self):
''' Close the application and all installed plugins. '''
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
self.stopped = True
def match(self, environ):
""" Search for a matching route and return a (:class:`Route` , urlargs)
tuple. The second value is a dictionary with parameters extracted
from the URL. Raise :exc:`HTTPError` (404/405) on a non-match."""
return self.router.match(environ)
def get_url(self, routename, **kargs):
""" Return a string that matches a named route """
scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
location = self.router.build(routename, **kargs).lstrip('/')
return urljoin(urljoin('/', scriptname), location)
def route(self, path=None, method='GET', callback=None, name=None,
apply=None, skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/:name')
def hello(name):
return 'Hello %s' % name
The ``:name`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path): path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
def decorator(callback):
# TODO: Documentation and tests
if isinstance(callback, basestring): callback = load(callback)
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
route = Route(self, rule, verb, callback, name=name,
plugins=plugins, skiplist=skiplist, **config)
self.routes.append(route)
self.router.add(rule, verb, route, name=name)
if DEBUG: route.prepare()
return callback
return decorator(callback) if callback else decorator
def get(self, path=None, method='GET', **options):
""" Equals :meth:`route`. """
return self.route(path, method, **options)
def post(self, path=None, method='POST', **options):
""" Equals :meth:`route` with a ``POST`` method parameter. """
return self.route(path, method, **options)
def put(self, path=None, method='PUT', **options):
""" Equals :meth:`route` with a ``PUT`` method parameter. """
return self.route(path, method, **options)
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options)
def error(self, code=500):
""" Decorator: Register an output handler for a HTTP error code"""
def wrapper(handler):
self.error_handler[int(code)] = handler
return handler
return wrapper
def hook(self, name):
""" Return a decorator that attaches a callback to a hook. """
def wrapper(func):
self.hooks.add(name, func)
return func
return wrapper
def handle(self, path, method='GET'):
""" (deprecated) Execute the first matching route callback and return
the result. :exc:`HTTPResponse` exceptions are catched and returned.
If :attr:`Bottle.catchall` is true, other exceptions are catched as
well and returned as :exc:`HTTPError` instances (500).
"""
depr("This method will change semantics in 0.10. Try to avoid it.")
if isinstance(path, dict):
return self._handle(path)
return self._handle({'PATH_INFO': path, 'REQUEST_METHOD': method.upper()})
def _handle(self, environ):
try:
route, args = self.router.match(environ)
environ['route.handle'] = environ['bottle.route'] = route
environ['route.url_args'] = args
return route.call(**args)
except HTTPResponse, r:
return r
except RouteReset:
route.reset()
return self._handle(environ)
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception, e:
if not self.catchall: raise
stacktrace = format_exc(10)
environ['wsgi.errors'].write(stacktrace)
return HTTPError(500, "Internal Server Error", e, stacktrace)
def _cast(self, out, request, response, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes
"""
# Empty output is done here
if not out:
response['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list))\
and isinstance(out[0], (bytes, unicode)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, bytes):
response['Content-Length'] = len(out)
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
# TODO: Handle these explicitly in handle() or make them iterable.
if isinstance(out, HTTPError):
out.apply(response)
out = self.error_handler.get(out.status, repr)(out)
if isinstance(out, HTTPResponse):
depr('Error handlers must not return :exc:`HTTPResponse`.') #0.9
return self._cast(out, request, response)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.output, request, response)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
out = iter(out)
first = out.next()
while not first:
first = out.next()
except StopIteration:
return self._cast('', request, response)
except HTTPResponse, e:
first = e
except Exception, e:
first = HTTPError(500, 'Unhandled exception', e, format_exc(10))
if isinstance(e, (KeyboardInterrupt, SystemExit, MemoryError))\
or not self.catchall:
raise
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first, request, response)
if isinstance(first, bytes):
return itertools.chain([first], out)
if isinstance(first, unicode):
return itertools.imap(lambda x: x.encode(response.charset),
itertools.chain([first], out))
return self._cast(HTTPError(500, 'Unsupported response type: %s'\
% type(first)), request, response)
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
environ['bottle.app'] = self
request.bind(environ)
response.bind()
out = self._cast(self._handle(environ), request, response)
# rfc2616 section 4.3
if response._status_code in (100, 101, 204, 304)\
or request.method == 'HEAD':
if hasattr(out, 'close'): out.close()
out = []
start_response(response._status_line, list(response.iter_headers()))
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception, e:
if not self.catchall: raise
err = '<h1>Critical error while processing request: %s</h1>' \
% html_escape(environ.get('PATH_INFO', '/'))
if DEBUG:
err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \
'<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \
% (html_escape(repr(e)), html_escape(format_exc(10)))
environ['wsgi.errors'].write(err)
headers = [('Content-Type', 'text/html; charset=UTF-8')]
start_response('500 INTERNAL SERVER ERROR', headers)
return [tob(err)]
def __call__(self, environ, start_response):
''' Each instance of :class:'Bottle' is a WSGI application. '''
return self.wsgi(environ, start_response)
###############################################################################
# HTTP and WSGI Tools ##########################################################
###############################################################################
class BaseRequest(DictMixin):
""" A wrapper for WSGI environment dictionaries that adds a lot of
convenient access methods and properties. Most of them are read-only."""
#: Maximum size of memory buffer for :attr:`body` in bytes.
MEMFILE_MAX = 102400
def __init__(self, environ):
""" Wrap a WSGI environ dictionary. """
#: The wrapped WSGI environ dictionary. This is the only real attribute.
#: All other attributes actually are read-only properties.
self.environ = environ
environ['bottle.request'] = self
@property
def path(self):
''' The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
broken clients and avoid the "empty path" edge case). '''
return '/' + self.environ.get('PATH_INFO','').lstrip('/')
@property
def method(self):
''' The ``REQUEST_METHOD`` value as an uppercase string. '''
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@DictProperty('environ', 'bottle.request.headers', read_only=True)
def headers(self):
''' A :class:`WSGIHeaderDict` that provides case-insensitive access to
HTTP request headers. '''
return WSGIHeaderDict(self.environ)
def get_header(self, name, default=None):
''' Return the value of a request header, or a given default value. '''
return self.headers.get(name, default)
@DictProperty('environ', 'bottle.request.cookies', read_only=True)
def cookies(self):
""" Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT
decoded. Use :meth:`get_cookie` if you expect signed cookies. """
cookies = SimpleCookie(self.environ.get('HTTP_COOKIE',''))
return FormsDict((c.key, c.value) for c in cookies.itervalues())
def get_cookie(self, key, default=None, secret=None):
""" Return the content of a cookie. To read a `Signed Cookie`, the
`secret` must match the one used to create the cookie (see
:meth:`BaseResponse.set_cookie`). If anything goes wrong (missing
cookie or wrong signature), return a default value. """
value = self.cookies.get(key)
if secret and value:
dec = cookie_decode(value, secret) # (key, value) tuple or None
return dec[1] if dec and dec[0] == key else default
return value or default
@DictProperty('environ', 'bottle.request.query', read_only=True)
def query(self):
''' The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. '''
data = parse_qs(self.query_string, keep_blank_values=True)
get = self.environ['bottle.get'] = FormsDict()
for key, values in data.iteritems():
for value in values:
get[key] = value
return get
@DictProperty('environ', 'bottle.request.forms', read_only=True)
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is retuned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. """
forms = FormsDict()
for name, item in self.POST.iterallitems():
if not hasattr(item, 'filename'):
forms[name] = item
return forms
@DictProperty('environ', 'bottle.request.params', read_only=True)
def params(self):
""" A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = FormsDict()
for key, value in self.query.iterallitems():
params[key] = value
for key, value in self.forms.iterallitems():
params[key] = value
return params
@DictProperty('environ', 'bottle.request.files', read_only=True)
def files(self):
""" File uploads parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The values are instances of
:class:`cgi.FieldStorage`. The most important attributes are:
filename
The filename, if specified; otherwise None; this is the client
side filename, *not* the file name on which it is stored (that's
a temporary file you don't deal with)
file
The file(-like) object from which you can read the data.
value
The value as a *string*; for file uploads, this transparently
reads the file every time you request the value. Do not do this
on big files.
"""
files = FormsDict()
for name, item in self.POST.iterallitems():
if hasattr(item, 'filename'):
files[name] = item
return files
@DictProperty('environ', 'bottle.request.json', read_only=True)
def json(self):
''' If the ``Content-Type`` header is ``application/json``, this
property holds the parsed content of the request body. Only requests
smaller than :attr:`MEMFILE_MAX` are processed to avoid memory
exhaustion. '''
if 'application/json' in self.environ.get('CONTENT_TYPE', '') \
and 0 < self.content_length < self.MEMFILE_MAX:
return json_loads(self.body.read(self.MEMFILE_MAX))
return None
@DictProperty('environ', 'bottle.request.body', read_only=True)
def _body(self):
maxread = max(0, self.content_length)
stream = self.environ['wsgi.input']
body = BytesIO() if maxread < self.MEMFILE_MAX else TemporaryFile(mode='w+b')
while maxread > 0:
part = stream.read(min(maxread, self.MEMFILE_MAX))
if not part: break
body.write(part)
maxread -= len(part)
self.environ['wsgi.input'] = body
body.seek(0)
return body
@property
def body(self):
""" The HTTP request body as a seek-able file-like object. Depending on
:attr:`MEMFILE_MAX`, this is either a temporary file or a
:class:`io.BytesIO` instance. Accessing this property for the first
time reads and replaces the ``wsgi.input`` environ variable.
Subsequent accesses just do a `seek(0)` on the file object. """
self._body.seek(0)
return self._body
#: An alias for :attr:`query`.
GET = query
@DictProperty('environ', 'bottle.request.post', read_only=True)
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`FormsDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
post = FormsDict()
safe_env = {'QUERY_STRING':''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
if NCTextIOWrapper:
fb = NCTextIOWrapper(self.body, encoding='ISO-8859-1', newline='\n')
else:
fb = self.body
data = cgi.FieldStorage(fp=fb, environ=safe_env, keep_blank_values=True)
for item in data.list or []:
post[item.name] = item if item.filename else item.value
return post
@property
def COOKIES(self):
''' Alias for :attr:`cookies` (deprecated). '''
depr('BaseRequest.COOKIES was renamed to BaseRequest.cookies (lowercase).')
return self.cookies
@property
def url(self):
""" The full request URI including hostname and scheme. If your app
lives behind a reverse proxy or load balancer and you get confusing
results, make sure that the ``X-Forwarded-Host`` header is set
correctly. """
return self.urlparts.geturl()
@DictProperty('environ', 'bottle.request.urlparts', read_only=True)
def urlparts(self):
''' The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. '''
env = self.environ
http = env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
@property
def fullpath(self):
""" Request path including :attr:`script_name` (if present). """
return urljoin(self.script_name, self.path.lstrip('/'))
@property
def query_string(self):
""" The raw :attr:`query` part of the URL (everything in between ``?``
and ``#``) as a string. """
return self.environ.get('QUERY_STRING', '')
@property
def script_name(self):
''' The initial portion of the URL's `path` that was removed by a higher
level (server or routing middleware) before the application was
called. This script path is returned with leading and tailing
slashes. '''
script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
return '/' + script_name + '/' if script_name else '/'
def path_shift(self, shift=1):
''' Shift path segments from :attr:`path` to :attr:`script_name` and
vice versa.
:param shift: The number of path segments to shift. May be negative
to change the shift direction. (default: 1)
'''
script = self.environ.get('SCRIPT_NAME','/')
self['SCRIPT_NAME'], self['PATH_INFO'] = path_shift(script, self.path, shift)
@property
def content_length(self):
''' The request body length as an integer. The client is responsible to
set this header. Otherwise, the real length of the body is unknown
and -1 is returned. In this case, :attr:`body` will be empty. '''
return int(self.environ.get('CONTENT_LENGTH') or -1)
@property
def is_xhr(self):
''' True if the request was triggered by a XMLHttpRequest. This only
works with JavaScript libraries that support the `X-Requested-With`
header (most of the popular libraries do). '''
requested_with = self.environ.get('HTTP_X_REQUESTED_WITH','')
return requested_with.lower() == 'xmlhttprequest'
@property
def is_ajax(self):
''' Alias for :attr:`is_xhr`. "Ajax" is not the right term. '''
return self.is_xhr
@property
def auth(self):
""" HTTP authentication data as a (user, password) tuple. This
implementation currently supports basic (not digest) authentication
only. If the authentication happened at a higher level (e.g. in the
front web-server or a middleware), the password field is None, but
the user field is looked up from the ``REMOTE_USER`` environ
variable. On any errors, None is returned. """
basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION',''))
if basic: return basic
ruser = self.environ.get('REMOTE_USER')
if ruser: return (ruser, None)
return None
@property
def remote_route(self):
""" A list of all IPs that were involved in this request, starting with
the client IP and followed by zero or more proxies. This does only
work if all proxies support the ```X-Forwarded-For`` header. Note
that this information can be forged by malicious clients. """
proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
if proxy: return [ip.strip() for ip in proxy.split(',')]
remote = self.environ.get('REMOTE_ADDR')
return [remote] if remote else []
@property
def remote_addr(self):
""" The client IP as a string. Note that this information can be forged
by malicious clients. """
route = self.remote_route
return route[0] if route else None
def copy(self):
""" Return a new :class:`Request` with a shallow :attr:`environ` copy. """
return Request(self.environ.copy())
def __getitem__(self, key): return self.environ[key]
def __delitem__(self, key): self[key] = ""; del(self.environ[key])
def __iter__(self): return iter(self.environ)
def __len__(self): return len(self.environ)
def keys(self): return self.environ.keys()
def __setitem__(self, key, value):
""" Change an environ value and clear all caches that depend on it. """
if self.environ.get('bottle.request.readonly'):
raise KeyError('The environ dictionary is read-only.')
self.environ[key] = value
todelete = ()
if key == 'wsgi.input':
todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
elif key == 'QUERY_STRING':
todelete = ('query', 'params')
elif key.startswith('HTTP_'):
todelete = ('headers', 'cookies')
for key in todelete:
self.environ.pop('bottle.request.'+key, None)
def __repr__(self):
return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url)
def _hkey(s):
return s.title().replace('_','-')
class HeaderProperty(object):
def __init__(self, name, reader=None, writer=str, default=''):
self.name, self.reader, self.writer, self.default = name, reader, writer, default
self.__doc__ = 'Current value of the %r header.' % name.title()
def __get__(self, obj, cls):
if obj is None: return self
value = obj.headers.get(self.name)
return self.reader(value) if (value and self.reader) else (value or self.default)
def __set__(self, obj, value):
if self.writer: value = self.writer(value)
obj.headers[self.name] = value
def __delete__(self, obj):
if self.name in obj.headers:
del obj.headers[self.name]
class BaseResponse(object):
""" Storage class for a response body as well as headers and cookies.
This class does support dict-like case-insensitive item-access to
headers, but is NOT a dict. Most notably, iterating over a response
yields parts of the body and not the headers.
"""
default_status = 200
default_content_type = 'text/html; charset=UTF-8'
# Header blacklist for specific response codes
# (rfc2616 section 10.2.3 and 10.3.5)
bad_headers = {
204: set(('Content-Type',)),
304: set(('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Range', 'Content-Type',
'Content-Md5', 'Last-Modified'))}
def __init__(self, body='', status=None, **headers):
self._status_line = None
self._status_code = None
self.body = body
self._cookies = None
self._headers = {'Content-Type': [self.default_content_type]}
self.status = status or self.default_status
if headers:
for name, value in headers.items():
self[name] = value
def copy(self):
''' Returns a copy of self. '''
copy = Response()
copy.status = self.status
copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
return copy
def __iter__(self):
return iter(self.body)
def close(self):
if hasattr(self.body, 'close'):
self.body.close()
@property
def status_line(self):
''' The HTTP status line as a string (e.g. ``404 Not Found``).'''
return self._status_line
@property
def status_code(self):
''' The HTTP status code as an integer (e.g. 404).'''
return self._status_code
def _set_status(self, status):
if isinstance(status, int):
code, status = status, _HTTP_STATUS_LINES.get(status)
elif ' ' in status:
status = status.strip()
code = int(status.split()[0])
else:
raise ValueError('String status line without a reason phrase.')
if not 100 <= code <= 999: raise ValueError('Status code out of range.')
self._status_code = code
self._status_line = status or ('%d Unknown' % code)
def _get_status(self):
depr('BaseReuqest.status will change to return a string in 0.11. Use'\
' status_line and status_code to make sure.') #0.10
return self._status_code
status = property(_get_status, _set_status, None,
''' A writeable property to change the HTTP response status. It accepts
either a numeric code (100-999) or a string with a custom reason
phrase (e.g. "404 Brain not found"). Both :data:`status_line` and
:data:`status_code` are updates accordingly. The return value is
always a numeric code. ''')
del _get_status, _set_status
@property
def headers(self):
''' An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers. '''
self.__dict__['headers'] = hdict = HeaderDict()
hdict.dict = self._headers
return hdict
def __contains__(self, name): return _hkey(name) in self._headers
def __delitem__(self, name): del self._headers[_hkey(name)]
def __getitem__(self, name): return self._headers[_hkey(name)][-1]
def __setitem__(self, name, value): self._headers[_hkey(name)] = [str(value)]
def get_header(self, name, default=None):
''' Return the value of a previously defined header. If there is no
header with that name, return a default value. '''
return self._headers.get(_hkey(name), [default])[-1]
def set_header(self, name, value, append=False):
''' Create a new response header, replacing any previously defined
headers with the same name. '''
if append:
self.add_header(name, value)
else:
self._headers[_hkey(name)] = [str(value)]
def add_header(self, name, value):
''' Add an additional response header, not removing duplicates. '''
self._headers.setdefault(_hkey(name), []).append(str(value))
def iter_headers(self):
''' Yield (header, value) tuples, skipping headers that are not
allowed with the current response status code. '''
headers = self._headers.iteritems()
bad_headers = self.bad_headers.get(self.status_code)
if bad_headers:
headers = [h for h in headers if h[0] not in bad_headers]
for name, values in headers:
for value in values:
yield name, value
if self._cookies:
for c in self._cookies.values():
yield 'Set-Cookie', c.OutputString()
def wsgiheader(self):
depr('The wsgiheader method is deprecated. See headerlist.') #0.10
return self.headerlist
@property
def headerlist(self):
''' WSGI conform list of (header, value) tuples. '''
return list(self.iter_headers())
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int)
@property
def charset(self):
""" Return the charset specified in the content-type header (default: utf8). """
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return 'UTF-8'
@property
def COOKIES(self):
""" A dict-like SimpleCookie instance. This should not be used directly.
See :meth:`set_cookie`. """
depr('The COOKIES dict is deprecated. Use `set_cookie()` instead.') # 0.10
if not self._cookies:
self._cookies = SimpleCookie()
return self._cookies
def set_cookie(self, name, value, secret=None, **options):
''' Create a new cookie or replace an old one. If the `secret` parameter is
set, create a `Signed Cookie` (described below).
:param name: the name of the cookie.
:param value: the value of the cookie.
:param secret: a signature key required for signed cookies.
Additionally, this method accepts all RFC 2109 attributes that are
supported by :class:`cookie.Morsel`, including:
:param max_age: maximum age in seconds. (default: None)
:param expires: a datetime object or UNIX timestamp. (default: None)
:param domain: the domain that is allowed to read the cookie.
(default: current domain)
:param path: limits the cookie to a given path (default: current path)
:param secure: limit the cookie to HTTPS connections (default: off).
:param httponly: prevents client-side javascript to read this cookie
(default: off, requires Python 2.6 or newer).
If neither `expires` nor `max_age` is set (default), the cookie will
expire at the end of the browser session (as soon as the browser
window is closed).
Signed cookies may store any pickle-able object and are
cryptographically signed to prevent manipulation. Keep in mind that
cookies are limited to 4kb in most browsers.
Warning: Signed cookies are not encrypted (the client can still see
the content) and not copy-protected (the client can restore an old
cookie). The main intention is to make pickling and unpickling
save, not to store secret information at client side.
'''
if not self._cookies:
self._cookies = SimpleCookie()
if secret:
value = touni(cookie_encode((name, value), secret))
elif not isinstance(value, basestring):
raise TypeError('Secret key missing for non-string Cookie.')
if len(value) > 4096: raise ValueError('Cookie value to long.')
self._cookies[name] = value
for key, value in options.iteritems():
if key == 'max_age':
if isinstance(value, timedelta):
value = value.seconds + value.days * 24 * 3600
if key == 'expires':
if isinstance(value, (datedate, datetime)):
value = value.timetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
self._cookies[name][key.replace('_', '-')] = value
def delete_cookie(self, key, **kwargs):
''' Delete a cookie. Be sure to use the same `domain` and `path`
settings as used to create the cookie. '''
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs)
def __repr__(self):
out = ''
for name, value in self.headerlist:
out += '%s: %s\n' % (name.title(), value.strip())
return out
class LocalRequest(BaseRequest, threading.local):
''' A thread-local subclass of :class:`BaseRequest`. '''
def __init__(self): pass
bind = BaseRequest.__init__
class LocalResponse(BaseResponse, threading.local):
''' A thread-local subclass of :class:`BaseResponse`. '''
bind = BaseResponse.__init__
Response = LocalResponse # BC 0.9
Request = LocalRequest # BC 0.9
###############################################################################
# Plugins ######################################################################
###############################################################################
class PluginError(BottleException): pass
class JSONPlugin(object):
name = 'json'
api = 2
def __init__(self, json_dumps=json_dumps):
self.json_dumps = json_dumps
def apply(self, callback, context):
dumps = self.json_dumps
if not dumps: return callback
def wrapper(*a, **ka):
rv = callback(*a, **ka)
if isinstance(rv, dict):
#Attempt to serialize, raises exception on failure
json_response = dumps(rv)
#Set content type only if serialization succesful
response.content_type = 'application/json'
return json_response
return rv
return wrapper
class HooksPlugin(object):
name = 'hooks'
api = 2
_names = 'before_request', 'after_request', 'app_reset'
def __init__(self):
self.hooks = dict((name, []) for name in self._names)
self.app = None
def _empty(self):
return not (self.hooks['before_request'] or self.hooks['after_request'])
def setup(self, app):
self.app = app
def add(self, name, func):
''' Attach a callback to a hook. '''
was_empty = self._empty()
self.hooks.setdefault(name, []).append(func)
if self.app and was_empty and not self._empty(): self.app.reset()
def remove(self, name, func):
''' Remove a callback from a hook. '''
was_empty = self._empty()
if name in self.hooks and func in self.hooks[name]:
self.hooks[name].remove(func)
if self.app and not was_empty and self._empty(): self.app.reset()
def trigger(self, name, *a, **ka):
''' Trigger a hook and return a list of results. '''
hooks = self.hooks[name]
if ka.pop('reversed', False): hooks = hooks[::-1]
return [hook(*a, **ka) for hook in hooks]
def apply(self, callback, context):
if self._empty(): return callback
def wrapper(*a, **ka):
self.trigger('before_request')
rv = callback(*a, **ka)
self.trigger('after_request', reversed=True)
return rv
return wrapper
class TemplatePlugin(object):
''' This plugin applies the :func:`view` decorator to all routes with a
`template` config parameter. If the parameter is a tuple, the second
element must be a dict with additional options (e.g. `template_engine`)
or default variables for the template. '''
name = 'template'
api = 2
def apply(self, callback, route):
conf = route.config.get('template')
if isinstance(conf, (tuple, list)) and len(conf) == 2:
return view(conf[0], **conf[1])(callback)
elif isinstance(conf, str) and 'template_opts' in route.config:
depr('The `template_opts` parameter is deprecated.') #0.9
return view(conf, **route.config['template_opts'])(callback)
elif isinstance(conf, str):
return view(conf)(callback)
else:
return callback
#: Not a plugin, but part of the plugin API. TODO: Find a better place.
class _ImportRedirect(object):
def __init__(self, name, impmask):
''' Create a virtual package that redirects imports (see PEP 302). '''
self.name = name
self.impmask = impmask
self.module = sys.modules.setdefault(name, imp.new_module(name))
self.module.__dict__.update({'__file__': __file__, '__path__': [],
'__all__': [], '__loader__': self})
sys.meta_path.append(self)
def find_module(self, fullname, path=None):
if '.' not in fullname: return
packname, modname = fullname.rsplit('.', 1)
if packname != self.name: return
return self
def load_module(self, fullname):
if fullname in sys.modules: return sys.modules[fullname]
packname, modname = fullname.rsplit('.', 1)
realname = self.impmask % modname
__import__(realname)
module = sys.modules[fullname] = sys.modules[realname]
setattr(self.module, modname, module)
module.__loader__ = self
return module
###############################################################################
# Common Utilities #############################################################
###############################################################################
class MultiDict(DictMixin):
""" This dict stores multiple values per key, but behaves exactly like a
normal dict in that it returns only the newest value for any given key.
There are special methods available to access the full list of values.
"""
def __init__(self, *a, **k):
self.dict = dict((k, [v]) for k, v in dict(*a, **k).iteritems())
def __len__(self): return len(self.dict)
def __iter__(self): return iter(self.dict)
def __contains__(self, key): return key in self.dict
def __delitem__(self, key): del self.dict[key]
def __getitem__(self, key): return self.dict[key][-1]
def __setitem__(self, key, value): self.append(key, value)
def iterkeys(self): return self.dict.iterkeys()
def itervalues(self): return (v[-1] for v in self.dict.itervalues())
def iteritems(self): return ((k, v[-1]) for (k, v) in self.dict.iteritems())
def iterallitems(self):
for key, values in self.dict.iteritems():
for value in values:
yield key, value
# 2to3 is not able to fix these automatically.
keys = iterkeys if py3k else lambda self: list(self.iterkeys())
values = itervalues if py3k else lambda self: list(self.itervalues())
items = iteritems if py3k else lambda self: list(self.iteritems())
allitems = iterallitems if py3k else lambda self: list(self.iterallitems())
def get(self, key, default=None, index=-1, type=None):
''' Return the most recent value for a key.
:param default: The default value to be returned if the key is not
present or the type conversion fails.
:param index: An index for the list of available values.
:param type: If defined, this callable is used to cast the value
into a specific type. Exception are suppressed and result in
the default value to be returned.
'''
try:
val = self.dict[key][index]
return type(val) if type else val
except Exception, e:
pass
return default
def append(self, key, value):
''' Add a new value to the list of values for this key. '''
self.dict.setdefault(key, []).append(value)
def replace(self, key, value):
''' Replace the list of values with a single value. '''
self.dict[key] = [value]
def getall(self, key):
''' Return a (possibly empty) list of values for a key. '''
return self.dict.get(key) or []
#: Aliases for WTForms to mimic other multi-dict APIs (Django)
getone = get
getlist = getall
class FormsDict(MultiDict):
''' This :class:`MultiDict` subclass is used to store request form data.
Additionally to the normal dict-like item access methods (which return
unmodified data as native strings), this container also supports
attribute-like access to its values. Attribues are automatiically de- or
recoded to match :attr:`input_encoding` (default: 'utf8'). Missing
attributes default to an empty string. '''
#: Encoding used for attribute values.
input_encoding = 'utf8'
def getunicode(self, name, default=None, encoding=None):
value, enc = self.get(name, default), encoding or self.input_encoding
try:
if isinstance(value, bytes): # Python 2 WSGI
return value.decode(enc)
elif isinstance(value, unicode): # Python 3 WSGI
return value.encode('latin1').decode(enc)
return value
except UnicodeError, e:
return default
def __getattr__(self, name): return self.getunicode(name, default=u'')
class HeaderDict(MultiDict):
""" A case-insensitive version of :class:`MultiDict` that defaults to
replace the old value instead of appending it. """
def __init__(self, *a, **ka):
self.dict = {}
if a or ka: self.update(*a, **ka)
def __contains__(self, key): return _hkey(key) in self.dict
def __delitem__(self, key): del self.dict[_hkey(key)]
def __getitem__(self, key): return self.dict[_hkey(key)][-1]
def __setitem__(self, key, value): self.dict[_hkey(key)] = [str(value)]
def append(self, key, value):
self.dict.setdefault(_hkey(key), []).append(str(value))
def replace(self, key, value): self.dict[_hkey(key)] = [str(value)]
def getall(self, key): return self.dict.get(_hkey(key)) or []
def get(self, key, default=None, index=-1):
return MultiDict.get(self, _hkey(key), default, index)
def filter(self, names):
for name in map(_hkey, names):
if name in self.dict:
del self.dict[name]
class WSGIHeaderDict(DictMixin):
''' This dict-like class wraps a WSGI environ dict and provides convenient
access to HTTP_* fields. Keys and values are native strings
(2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI
environment contains non-native string values, these are de- or encoded
using a lossless 'latin1' character set.
The API will remain stable even on changes to the relevant PEPs.
Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one
that uses non-native strings.)
'''
#: List of keys that do not have a 'HTTP_' prefix.
cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH')
def __init__(self, environ):
self.environ = environ
def _ekey(self, key):
''' Translate header field name to CGI/WSGI environ key. '''
key = key.replace('-','_').upper()
if key in self.cgikeys:
return key
return 'HTTP_' + key
def raw(self, key, default=None):
''' Return the header value as is (may be bytes or unicode). '''
return self.environ.get(self._ekey(key), default)
def __getitem__(self, key):
return tonat(self.environ[self._ekey(key)], 'latin1')
def __setitem__(self, key, value):
raise TypeError("%s is read-only." % self.__class__)
def __delitem__(self, key):
raise TypeError("%s is read-only." % self.__class__)
def __iter__(self):
for key in self.environ:
if key[:5] == 'HTTP_':
yield key[5:].replace('_', '-').title()
elif key in self.cgikeys:
yield key.replace('_', '-').title()
def keys(self): return [x for x in self]
def __len__(self): return len(self.keys())
def __contains__(self, key): return self._ekey(key) in self.environ
class ConfigDict(dict):
''' A dict-subclass with some extras: You can access keys like attributes.
Uppercase attributes create new ConfigDicts and act as name-spaces.
Other missing attributes return None. Calling a ConfigDict updates its
values and returns itself.
>>> cfg = ConfigDict()
>>> cfg.Namespace.value = 5
>>> cfg.OtherNamespace(a=1, b=2)
>>> cfg
{'Namespace': {'value': 5}, 'OtherNamespace': {'a': 1, 'b': 2}}
'''
def __getattr__(self, key):
if key not in self and key[0].isupper():
self[key] = ConfigDict()
return self.get(key)
def __setattr__(self, key, value):
if hasattr(dict, key):
raise AttributeError('Read-only attribute.')
if key in self and self[key] and isinstance(self[key], ConfigDict):
raise AttributeError('Non-empty namespace attribute.')
self[key] = value
def __delattr__(self, key):
if key in self: del self[key]
def __call__(self, *a, **ka):
for key, value in dict(*a, **ka).iteritems(): setattr(self, key, value)
return self
class AppStack(list):
""" A stack-like list. Calling it returns the head of the stack. """
def __call__(self):
""" Return the current default application. """
return self[-1]
def push(self, value=None):
""" Add a new :class:`Bottle` instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
class WSGIFileWrapper(object):
def __init__(self, fp, buffer_size=1024*64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines'):
if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr))
def __iter__(self):
read, buff = self.fp.read, self.buffer_size
while True:
part = read(buff)
if not part: break
yield part
###############################################################################
# Application Helper ###########################################################
###############################################################################
def abort(code=500, text='Unknown Error: Application stopped.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def redirect(url, code=None):
""" Aborts execution and causes a 303 or 302 redirect, depending on
the HTTP protocol version. """
if code is None:
code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302
location = urljoin(request.url, url)
raise HTTPResponse("", status=code, header=dict(Location=location))
def static_file(filename, root, mimetype='auto', download=False):
""" Open a file in a safe way and return :exc:`HTTPResponse` with status
code 200, 305, 401 or 404. Set Content-Type, Content-Encoding,
Content-Length and Last-Modified header. Obey If-Modified-Since header
and HEAD requests.
"""
root = os.path.abspath(root) + os.sep
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
header = dict()
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if mimetype == 'auto':
mimetype, encoding = mimetypes.guess_type(filename)
if mimetype: header['Content-Type'] = mimetype
if encoding: header['Content-Encoding'] = encoding
elif mimetype:
header['Content-Type'] = mimetype
if download:
download = os.path.basename(filename if download == True else download)
header['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
header['Content-Length'] = stats.st_size
lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
header['Last-Modified'] = lm
ims = request.environ.get('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = parse_date(ims.split(";")[0].strip())
if ims is not None and ims >= int(stats.st_mtime):
header['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
return HTTPResponse(status=304, header=header)
body = '' if request.method == 'HEAD' else open(filename, 'rb')
return HTTPResponse(body, header=header)
###############################################################################
# HTTP Utilities and MISC (TODO) ###############################################
###############################################################################
def debug(mode=True):
""" Change the debug level.
There is only one debug level supported at the moment."""
global DEBUG
DEBUG = bool(mode)
def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0,)) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError, OverflowError):
return None
def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
#TODO: Add 2to3 save base64[encode/decode] functions.
user, pwd = touni(base64.b64decode(tob(data))).split(':',1)
return user, pwd
except (KeyError, ValueError):
return None
def _lscmp(a, b):
''' Compares two strings in a cryptographically save way:
Runtime is not affected by length of common prefix. '''
return not sum(0 if x==y else 1 for x, y in zip(a, b)) and len(a) == len(b)
def cookie_encode(data, key):
''' Encode and sign a pickle-able object. Return a (byte) string '''
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(tob(key), msg).digest())
return tob('!') + sig + tob('?') + msg
def cookie_decode(data, key):
''' Verify and decode an encoded string. Return an object or None.'''
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
if _lscmp(sig[1:], base64.b64encode(hmac.new(tob(key), msg).digest())):
return pickle.loads(base64.b64decode(msg))
return None
def cookie_is_encoded(data):
''' Return True if the argument looks like a encoded cookie.'''
return bool(data.startswith(tob('!')) and tob('?') in data)
def html_escape(string):
''' Escape HTML special characters ``&<>`` and quotes ``'"``. '''
return string.replace('&','&').replace('<','<').replace('>','>')\
.replace('"','"').replace("'",''')
def html_quote(string):
''' Escape and quote a string to be used as an HTTP attribute.'''
return '"%s"' % html_escape(string).replace('\n','%#10;')\
.replace('\r',' ').replace('\t','	')
def yieldroutes(func):
""" Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example::
a() -> '/a'
b(x, y) -> '/b/:x/:y'
c(x, y=5) -> '/c/:x' and '/c/:x/:y'
d(x=5, y=6) -> '/d' and '/d/:x' and '/d/:x/:y'
"""
import inspect # Expensive module. Only import if necessary.
path = '/' + func.__name__.replace('__','/').lstrip('/')
spec = inspect.getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/:%s' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/:%s' % arg
yield path
def path_shift(script_name, path_info, shift=1):
''' Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.
:param script_name: The SCRIPT_NAME path.
:param script_name: The PATH_INFO path.
:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
'''
if shift == 0: return script_name, path_info
pathlist = path_info.strip('/').split('/')
scriptlist = script_name.strip('/').split('/')
if pathlist and pathlist[0] == '': pathlist = []
if scriptlist and scriptlist[0] == '': scriptlist = []
if shift > 0 and shift <= len(pathlist):
moved = pathlist[:shift]
scriptlist = scriptlist + moved
pathlist = pathlist[shift:]
elif shift < 0 and shift >= -len(scriptlist):
moved = scriptlist[shift:]
pathlist = moved + pathlist
scriptlist = scriptlist[:shift]
else:
empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
new_script_name = '/' + '/'.join(scriptlist)
new_path_info = '/' + '/'.join(pathlist)
if path_info.endswith('/') and pathlist: new_path_info += '/'
return new_script_name, new_path_info
def validate(**vkargs):
"""
Validates and manipulates keyword arguments by user defined callables.
Handles ValueError and missing arguments by raising HTTPError(403).
"""
depr('Use route wildcard filters instead.')
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kargs):
for key, value in vkargs.iteritems():
if key not in kargs:
abort(403, 'Missing parameter: %s' % key)
try:
kargs[key] = value(kargs[key])
except ValueError:
abort(403, 'Wrong parameter format for: %s' % key)
return func(*args, **kargs)
return wrapper
return decorator
def auth_basic(check, realm="private", text="Access denied"):
''' Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. '''
def decorator(func):
def wrapper(*a, **ka):
user, password = request.auth or (None, None)
if user is None or not check(user, password):
response.headers['WWW-Authenticate'] = 'Basic realm="%s"' % realm
return HTTPError(401, text)
return func(*a, **ka)
return wrapper
return decorator
def make_default_app_wrapper(name):
''' Return a callable that relays calls to the current default app. '''
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
for name in '''route get post put delete error mount
hook install uninstall'''.split():
globals()[name] = make_default_app_wrapper(name)
url = make_default_app_wrapper('get_url')
del name
###############################################################################
# Server Adapter ###############################################################
###############################################################################
class ServerAdapter(object):
quiet = False
def __init__(self, host='127.0.0.1', port=8080, **config):
self.options = config
self.host = host
self.port = int(port)
def run(self, handler): # pragma: no cover
pass
def __repr__(self):
args = ', '.join(['%s=%s'%(k,repr(v)) for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class CGIServer(ServerAdapter):
quiet = True
def run(self, handler): # pragma: no cover
from wsgiref.handlers import CGIHandler
def fixed_environ(environ, start_response):
environ.setdefault('PATH_INFO', '')
return handler(environ, start_response)
CGIHandler().run(fixed_environ)
class FlupFCGIServer(ServerAdapter):
def run(self, handler): # pragma: no cover
import flup.server.fcgi
self.options.setdefault('bindAddress', (self.host, self.port))
flup.server.fcgi.WSGIServer(handler, **self.options).run()
class WSGIRefServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from wsgiref.simple_server import make_server, WSGIRequestHandler
if self.quiet:
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw): pass
self.options['handler_class'] = QuietHandler
srv = make_server(self.host, self.port, handler, **self.options)
srv.serve_forever()
class CherryPyServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from cherrypy import wsgiserver
server = wsgiserver.CherryPyWSGIServer((self.host, self.port), handler)
try:
server.start()
finally:
server.stop()
class PasteServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from paste import httpserver
if not self.quiet:
from paste.translogger import TransLogger
handler = TransLogger(handler)
httpserver.serve(handler, host=self.host, port=str(self.port),
**self.options)
class MeinheldServer(ServerAdapter):
def run(self, handler):
from meinheld import server
server.listen((self.host, self.port))
server.run(handler)
class FapwsServer(ServerAdapter):
""" Extremely fast webserver using libev. See http://www.fapws.org/ """
def run(self, handler): # pragma: no cover
import fapws._evwsgi as evwsgi
from fapws import base, config
port = self.port
if float(config.SERVER_IDENT[-2:]) > 0.4:
# fapws3 silently changed its API in 0.5
port = str(port)
evwsgi.start(self.host, port)
# fapws3 never releases the GIL. Complain upstream. I tried. No luck.
if 'BOTTLE_CHILD' in os.environ and not self.quiet:
print "WARNING: Auto-reloading does not work with Fapws3."
print " (Fapws3 breaks python thread support)"
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return handler(environ, start_response)
evwsgi.wsgi_cb(('', app))
evwsgi.run()
class TornadoServer(ServerAdapter):
""" The super hyped asynchronous server by facebook. Untested. """
def run(self, handler): # pragma: no cover
import tornado.wsgi, tornado.httpserver, tornado.ioloop
container = tornado.wsgi.WSGIContainer(handler)
server = tornado.httpserver.HTTPServer(container)
server.listen(port=self.port)
tornado.ioloop.IOLoop.instance().start()
class AppEngineServer(ServerAdapter):
""" Adapter for Google App Engine. """
quiet = True
def run(self, handler):
from google.appengine.ext.webapp import util
# A main() function in the handler script enables 'App Caching'.
# Lets makes sure it is there. This _really_ improves performance.
module = sys.modules.get('__main__')
if module and not hasattr(module, 'main'):
module.main = lambda: util.run_wsgi_app(handler)
util.run_wsgi_app(handler)
class TwistedServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
reactor.run()
class DieselServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(handler, port=self.port)
app.run()
class GeventServer(ServerAdapter):
""" Untested. Options:
* `monkey` (default: True) fixes the stdlib to use greenthreads.
* `fast` (default: False) uses libevent's http server, but has some
issues: No streaming, no pipelining, no SSL.
"""
def run(self, handler):
from gevent import wsgi as wsgi_fast, pywsgi, monkey, local
if self.options.get('monkey', True):
if not threading.local is local.local: monkey.patch_all()
wsgi = wsgi_fast if self.options.get('fast') else pywsgi
wsgi.WSGIServer((self.host, self.port), handler).serve_forever()
class GunicornServer(ServerAdapter):
""" Untested. See http://gunicorn.org/configure.html for options. """
def run(self, handler):
from gunicorn.app.base import Application
config = {'bind': "%s:%d" % (self.host, int(self.port))}
config.update(self.options)
class GunicornApplication(Application):
def init(self, parser, opts, args):
return config
def load(self):
return handler
GunicornApplication().run()
class EventletServer(ServerAdapter):
""" Untested """
def run(self, handler):
from eventlet import wsgi, listen
wsgi.server(listen((self.host, self.port)), handler)
class RocketServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from rocket import Rocket
server = Rocket((self.host, self.port), 'wsgi', { 'wsgi_app' : handler })
server.start()
class BjoernServer(ServerAdapter):
""" Fast server written in C: https://github.com/jonashaag/bjoern """
def run(self, handler):
from bjoern import run
run(handler, self.host, self.port)
class AutoServer(ServerAdapter):
""" Untested. """
adapters = [PasteServer, TwistedServer, CherryPyServer, WSGIRefServer]
def run(self, handler):
for sa in self.adapters:
try:
return sa(self.host, self.port, **self.options).run(handler)
except ImportError:
pass
server_names = {
'cgi': CGIServer,
'flup': FlupFCGIServer,
'wsgiref': WSGIRefServer,
'cherrypy': CherryPyServer,
'paste': PasteServer,
'fapws3': FapwsServer,
'tornado': TornadoServer,
'gae': AppEngineServer,
'twisted': TwistedServer,
'diesel': DieselServer,
'meinheld': MeinheldServer,
'gunicorn': GunicornServer,
'eventlet': EventletServer,
'gevent': GeventServer,
'rocket': RocketServer,
'bjoern' : BjoernServer,
'auto': AutoServer,
}
###############################################################################
# Application Control ##########################################################
###############################################################################
def load(target, **namespace):
""" Import a module or fetch an object from a module.
* ``package.module`` returns `module` as a module object.
* ``pack.mod:name`` returns the module variable `name` from `pack.mod`.
* ``pack.mod:func()`` calls `pack.mod.func()` and returns the result.
The last form accepts not only function calls, but any type of
expression. Keyword arguments passed to this function are available as
local variables. Example: ``import_string('re:compile(x)', x='[a-z]')``
"""
module, target = target.split(":", 1) if ':' in target else (target, None)
if module not in sys.modules: __import__(module)
if not target: return sys.modules[module]
if target.isalnum(): return getattr(sys.modules[module], target)
package_name = module.split('.')[0]
namespace[package_name] = sys.modules[package_name]
return eval('%s.%s' % (module, target), namespace)
def load_app(target):
""" Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. """
global NORUN; NORUN, nr_old = True, NORUN
try:
tmp = default_app.push() # Create a new "default application"
rv = load(target) # Import the target module
return rv if callable(rv) else tmp
finally:
default_app.remove(tmp) # Remove the temporary added default application
NORUN = nr_old
def run(app=None, server='wsgiref', host='127.0.0.1', port=8080,
interval=1, reloader=False, quiet=False, plugins=None, **kargs):
""" Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
"""
if NORUN: return
if reloader and not os.environ.get('BOTTLE_CHILD'):
try:
fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile): os.unlink(lockfile)
sys.exit(p.poll())
except KeyboardInterrupt:
pass
finally:
if os.path.exists(lockfile):
os.unlink(lockfile)
return
stderr = sys.stderr.write
try:
app = app or default_app()
if isinstance(app, basestring):
app = load_app(app)
if not callable(app):
raise ValueError("Application is not callable: %r" % app)
for plugin in plugins or []:
app.install(plugin)
if server in server_names:
server = server_names.get(server)
if isinstance(server, basestring):
server = load(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise ValueError("Unknown or unsupported server: %r" % server)
server.quiet = server.quiet or quiet
if not server.quiet:
stderr("Bottle server starting up (using %s)...\n" % repr(server))
stderr("Listening on http://%s:%d/\n" % (server.host, server.port))
stderr("Hit Ctrl-C to quit.\n\n")
if reloader:
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
with bgcheck:
server.run(app)
if bgcheck.status == 'reload':
sys.exit(3)
else:
server.run(app)
except KeyboardInterrupt:
pass
except (SyntaxError, ImportError):
if not reloader: raise
if not getattr(server, 'quiet', False): print_exc()
sys.exit(3)
finally:
if not getattr(server, 'quiet', False): stderr('Shutdown...\n')
class FileCheckerThread(threading.Thread):
''' Interrupt main-thread as soon as a changed module file is detected,
the lockfile gets deleted or gets to old. '''
def __init__(self, lockfile, interval):
threading.Thread.__init__(self)
self.lockfile, self.interval = lockfile, interval
#: Is one of 'reload', 'error' or 'exit'
self.status = None
def run(self):
exists = os.path.exists
mtime = lambda path: os.stat(path).st_mtime
files = dict()
for module in sys.modules.values():
path = getattr(module, '__file__', '')
if path[-4:] in ('.pyo', '.pyc'): path = path[:-1]
if path and exists(path): files[path] = mtime(path)
while not self.status:
if not exists(self.lockfile)\
or mtime(self.lockfile) < time.time() - self.interval - 5:
self.status = 'error'
thread.interrupt_main()
for path, lmtime in files.iteritems():
if not exists(path) or mtime(path) > lmtime:
self.status = 'reload'
thread.interrupt_main()
break
time.sleep(self.interval)
def __enter__(self):
self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.status: self.status = 'exit' # silent exit
self.join()
return issubclass(exc_type, KeyboardInterrupt)
###############################################################################
# Template Adapters ############################################################
###############################################################################
class TemplateError(HTTPError):
def __init__(self, message):
HTTPError.__init__(self, 500, message)
class BaseTemplate(object):
""" Base class and minimal API for template adapters """
extensions = ['tpl','html','thtml','stpl']
settings = {} #used in prepare()
defaults = {} #used in render()
def __init__(self, source=None, name=None, lookup=[], encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = map(os.path.abspath, lookup)
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=[]):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if os.path.isfile(name): return name
for spath in lookup:
fname = os.path.join(spath, name)
if os.path.isfile(fname):
return fname
for ext in cls.extensions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
''' This reads or sets the global settings stored in class.settings. '''
if args:
cls.settings = cls.settings.copy() # Make settings local to class
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, *args, **kwargs):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
Local variables may be provided in dictionaries (*args)
or directly, as keywords (**kwargs).
"""
raise NotImplementedError
class MakoTemplate(BaseTemplate):
def prepare(self, **options):
from mako.template import Template
from mako.lookup import TemplateLookup
options.update({'input_encoding':self.encoding})
options.setdefault('format_exceptions', bool(DEBUG))
lookup = TemplateLookup(directories=self.lookup, **options)
if self.source:
self.tpl = Template(self.source, lookup=lookup, **options)
else:
self.tpl = Template(uri=self.name, filename=self.filename, lookup=lookup, **options)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
class CheetahTemplate(BaseTemplate):
def prepare(self, **options):
from Cheetah.Template import Template
self.context = threading.local()
self.context.vars = {}
options['searchList'] = [self.context.vars]
if self.source:
self.tpl = Template(source=self.source, **options)
else:
self.tpl = Template(file=self.filename, **options)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
self.context.vars.update(self.defaults)
self.context.vars.update(kwargs)
out = str(self.tpl)
self.context.vars.clear()
return out
class Jinja2Template(BaseTemplate):
def prepare(self, filters=None, tests=None, **kwargs):
from jinja2 import Environment, FunctionLoader
if 'prefix' in kwargs: # TODO: to be removed after a while
raise RuntimeError('The keyword argument `prefix` has been removed. '
'Use the full jinja2 environment name line_statement_prefix instead.')
self.env = Environment(loader=FunctionLoader(self.loader), **kwargs)
if filters: self.env.filters.update(filters)
if tests: self.env.tests.update(tests)
if self.source:
self.tpl = self.env.from_string(self.source)
else:
self.tpl = self.env.get_template(self.filename)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
def loader(self, name):
fname = self.search(name, self.lookup)
if fname:
with open(fname, "rb") as f:
return f.read().decode(self.encoding)
class SimpleTALTemplate(BaseTemplate):
''' Untested! '''
def prepare(self, **options):
from simpletal import simpleTAL
# TODO: add option to load METAL files during render
if self.source:
self.tpl = simpleTAL.compileHTMLTemplate(self.source)
else:
with open(self.filename, 'rb') as fp:
self.tpl = simpleTAL.compileHTMLTemplate(tonat(fp.read()))
def render(self, *args, **kwargs):
from simpletal import simpleTALES
for dictarg in args: kwargs.update(dictarg)
# TODO: maybe reuse a context instead of always creating one
context = simpleTALES.Context()
for k,v in self.defaults.items():
context.addGlobal(k, v)
for k,v in kwargs.items():
context.addGlobal(k, v)
output = StringIO()
self.tpl.expand(context, output)
return output.getvalue()
class SimpleTemplate(BaseTemplate):
blocks = ('if', 'elif', 'else', 'try', 'except', 'finally', 'for', 'while',
'with', 'def', 'class')
dedent_blocks = ('elif', 'else', 'except', 'finally')
@lazy_attribute
def re_pytokens(cls):
''' This matches comments and all kinds of quoted strings but does
NOT match comments (#...) within quoted strings. (trust me) '''
return re.compile(r'''
(''(?!')|""(?!")|'{6}|"{6} # Empty strings (all 4 types)
|'(?:[^\\']|\\.)+?' # Single quotes (')
|"(?:[^\\"]|\\.)+?" # Double quotes (")
|'{3}(?:[^\\]|\\.|\n)+?'{3} # Triple-quoted strings (')
|"{3}(?:[^\\]|\\.|\n)+?"{3} # Triple-quoted strings (")
|\#.* # Comments
)''', re.VERBOSE)
def prepare(self, escape_func=html_escape, noescape=False, **kwargs):
self.cache = {}
enc = self.encoding
self._str = lambda x: touni(x, enc)
self._escape = lambda x: escape_func(touni(x, enc))
if noescape:
self._str, self._escape = self._escape, self._str
@classmethod
def split_comment(cls, code):
""" Removes comments (#...) from python code. """
if '#' not in code: return code
#: Remove comments only (leave quoted strings as they are)
subf = lambda m: '' if m.group(0)[0]=='#' else m.group(0)
return re.sub(cls.re_pytokens, subf, code)
@cached_property
def co(self):
return compile(self.code, self.filename or '<string>', 'exec')
@cached_property
def code(self):
stack = [] # Current Code indentation
lineno = 0 # Current line of code
ptrbuffer = [] # Buffer for printable strings and token tuple instances
codebuffer = [] # Buffer for generated python code
multiline = dedent = oneline = False
template = self.source or open(self.filename, 'rb').read()
def yield_tokens(line):
for i, part in enumerate(re.split(r'\{\{(.*?)\}\}', line)):
if i % 2:
if part.startswith('!'): yield 'RAW', part[1:]
else: yield 'CMD', part
else: yield 'TXT', part
def flush(): # Flush the ptrbuffer
if not ptrbuffer: return
cline = ''
for line in ptrbuffer:
for token, value in line:
if token == 'TXT': cline += repr(value)
elif token == 'RAW': cline += '_str(%s)' % value
elif token == 'CMD': cline += '_escape(%s)' % value
cline += ', '
cline = cline[:-2] + '\\\n'
cline = cline[:-2]
if cline[:-1].endswith('\\\\\\\\\\n'):
cline = cline[:-7] + cline[-1] # 'nobr\\\\\n' --> 'nobr'
cline = '_printlist([' + cline + '])'
del ptrbuffer[:] # Do this before calling code() again
code(cline)
def code(stmt):
for line in stmt.splitlines():
codebuffer.append(' ' * len(stack) + line.strip())
for line in template.splitlines(True):
lineno += 1
line = line if isinstance(line, unicode)\
else unicode(line, encoding=self.encoding)
if lineno <= 2:
m = re.search(r"%.*coding[:=]\s*([-\w\.]+)", line)
if m: self.encoding = m.group(1)
if m: line = line.replace('coding','coding (removed)')
if line.strip()[:2].count('%') == 1:
line = line.split('%',1)[1].lstrip() # Full line following the %
cline = self.split_comment(line).strip()
cmd = re.split(r'[^a-zA-Z0-9_]', cline)[0]
flush() # You are actually reading this? Good luck, it's a mess :)
if cmd in self.blocks or multiline:
cmd = multiline or cmd
dedent = cmd in self.dedent_blocks # "else:"
if dedent and not oneline and not multiline:
cmd = stack.pop()
code(line)
oneline = not cline.endswith(':') # "if 1: pass"
multiline = cmd if cline.endswith('\\') else False
if not oneline and not multiline:
stack.append(cmd)
elif cmd == 'end' and stack:
code('#end(%s) %s' % (stack.pop(), line.strip()[3:]))
elif cmd == 'include':
p = cline.split(None, 2)[1:]
if len(p) == 2:
code("_=_include(%s, _stdout, %s)" % (repr(p[0]), p[1]))
elif p:
code("_=_include(%s, _stdout)" % repr(p[0]))
else: # Empty %include -> reverse of %rebase
code("_printlist(_base)")
elif cmd == 'rebase':
p = cline.split(None, 2)[1:]
if len(p) == 2:
code("globals()['_rebase']=(%s, dict(%s))" % (repr(p[0]), p[1]))
elif p:
code("globals()['_rebase']=(%s, {})" % repr(p[0]))
else:
code(line)
else: # Line starting with text (not '%') or '%%' (escaped)
if line.strip().startswith('%%'):
line = line.replace('%%', '%', 1)
ptrbuffer.append(yield_tokens(line))
flush()
return '\n'.join(codebuffer) + '\n'
def subtemplate(self, _name, _stdout, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup)
return self.cache[_name].execute(_stdout, kwargs)
def execute(self, _stdout, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
env = self.defaults.copy()
env.update({'_stdout': _stdout, '_printlist': _stdout.extend,
'_include': self.subtemplate, '_str': self._str,
'_escape': self._escape, 'get': env.get,
'setdefault': env.setdefault, 'defined': env.__contains__})
env.update(kwargs)
eval(self.co, env)
if '_rebase' in env:
subtpl, rargs = env['_rebase']
rargs['_base'] = _stdout[:] #copy stdout
del _stdout[:] # clear stdout
return self.subtemplate(subtpl,_stdout,rargs)
return env
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
for dictarg in args: kwargs.update(dictarg)
stdout = []
self.execute(stdout, kwargs)
return ''.join(stdout)
def template(*args, **kwargs):
'''
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
'''
tpl = args[0] if args else None
template_adapter = kwargs.pop('template_adapter', SimpleTemplate)
if tpl not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
if isinstance(tpl, template_adapter):
TEMPLATES[tpl] = tpl
if settings: TEMPLATES[tpl].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tpl] = template_adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tpl] = template_adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tpl]:
abort(500, 'Template (%s) not found' % tpl)
for dictarg in args[1:]: kwargs.update(dictarg)
return TEMPLATES[tpl].render(kwargs)
mako_template = functools.partial(template, template_adapter=MakoTemplate)
cheetah_template = functools.partial(template, template_adapter=CheetahTemplate)
jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
simpletal_template = functools.partial(template, template_adapter=SimpleTALTemplate)
def view(tpl_name, **defaults):
''' Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
'''
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
return result
return wrapper
return decorator
mako_view = functools.partial(view, template_adapter=MakoTemplate)
cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
simpletal_view = functools.partial(view, template_adapter=SimpleTALTemplate)
###############################################################################
# Constants and Globals ########################################################
###############################################################################
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
NORUN = False # If set, run() does nothing. Used by load_app()
#: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found')
HTTP_CODES = httplib.responses
HTTP_CODES[418] = "I'm a teapot" # RFC 2324
HTTP_CODES[428] = "Precondition Required"
HTTP_CODES[429] = "Too Many Requests"
HTTP_CODES[431] = "Request Header Fields Too Large"
HTTP_CODES[511] = "Network Authentication Required"
_HTTP_STATUS_LINES = dict((k, '%d %s'%(k,v)) for (k,v) in HTTP_CODES.iteritems())
#: The default template used for error pages. Override with @error()
ERROR_PAGE_TEMPLATE = """
%try:
%from bottle import DEBUG, HTTP_CODES, request, touni
%status_name = HTTP_CODES.get(e.status, 'Unknown').title()
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Error {{e.status}}: {{status_name}}</title>
<style type="text/css">
html {background-color: #eee; font-family: sans;}
body {background-color: #fff; border: 1px solid #ddd;
padding: 15px; margin: 15px;}
pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
</style>
</head>
<body>
<h1>Error {{e.status}}: {{status_name}}</h1>
<p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt>
caused an error:</p>
<pre>{{e.output}}</pre>
%if DEBUG and e.exception:
<h2>Exception:</h2>
<pre>{{repr(e.exception)}}</pre>
%end
%if DEBUG and e.traceback:
<h2>Traceback:</h2>
<pre>{{e.traceback}}</pre>
%end
</body>
</html>
%except ImportError:
<b>ImportError:</b> Could not generate the error page. Please add bottle to
the import path.
%end
"""
#: A thread-safe instance of :class:`Request` representing the `current` request.
request = Request()
#: A thread-safe instance of :class:`Response` used to build the HTTP response.
response = Response()
#: A thread-safe namespace. Not used by Bottle.
local = threading.local()
# Initialize app stack (create first empty Bottle app)
# BC: 0.6.4 and needed for run()
app = default_app = AppStack()
app.push()
#: A virtual package that redirects import statements.
#: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`.
ext = _ImportRedirect(__name__+'.ext', 'bottle_%s').module
if __name__ == '__main__':
opt, args, parser = _cmd_options, _cmd_args, _cmd_parser
if opt.version:
print 'Bottle', __version__; sys.exit(0)
if not args:
parser.print_help()
print '\nError: No application specified.\n'
sys.exit(1)
try:
sys.path.insert(0, '.')
sys.modules.setdefault('bottle', sys.modules['__main__'])
except (AttributeError, ImportError), e:
parser.error(e.args[0])
if opt.bind and ':' in opt.bind:
host, port = opt.bind.rsplit(':', 1)
else:
host, port = (opt.bind or 'localhost'), 8080
debug(opt.debug)
run(args[0], host=host, port=port, server=opt.server, reloader=opt.reload, plugins=opt.plugin)
# THE END
|
jk1/intellij-community
|
refs/heads/master
|
python/testData/mover/commentOut_afterDown.py
|
83
|
if True:
a = 1
else:
a = 2
#comment <caret>
|
AWhetter/gyp
|
refs/heads/master
|
test/win/gyptest-cl-pdbname-override.py
|
266
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure pdb is named as expected (shared between .cc files).
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp()
CHDIR = 'compiler-flags'
test.run_gyp('pdbname-override.gyp', chdir=CHDIR)
test.build('pdbname-override.gyp', test.ALL, chdir=CHDIR)
# Confirm that the pdb generated by the compiler was renamed (and we also
# have the linker generated one).
test.built_file_must_exist('compiler_generated.pdb', chdir=CHDIR)
test.built_file_must_exist('linker_generated.pdb', chdir=CHDIR)
test.pass_test()
|
BeyondTheClouds/nova
|
refs/heads/disco/mitaka
|
nova/scheduler/filters/type_filter.py
|
17
|
# Copyright (c) 2012 The Cloudscaling Group, Inc.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.scheduler import filters
from nova.scheduler.filters import utils
class TypeAffinityFilter(filters.BaseHostFilter):
"""TypeAffinityFilter doesn't allow more than one VM type per host.
Note: this works best with ram_weight_multiplier
(spread) set to 1 (default).
"""
def host_passes(self, host_state, spec_obj):
"""Dynamically limits hosts to one instance type
Return False if host has any instance types other than the requested
type. Return True if all instance types match or if host is empty.
"""
instance_type = spec_obj.flavor
instance_type_id = instance_type.id
other_types_on_host = utils.other_types_on_host(host_state,
instance_type_id)
return not other_types_on_host
class AggregateTypeAffinityFilter(filters.BaseHostFilter):
"""AggregateTypeAffinityFilter limits instance_type by aggregate
return True if no instance_type key is set or if the aggregate metadata
key 'instance_type' has the instance_type name as a value
"""
# Aggregate data does not change within a request
run_filter_once_per_request = True
def host_passes(self, host_state, spec_obj):
instance_type = spec_obj.flavor
aggregate_vals = utils.aggregate_values_from_key(
host_state, 'instance_type')
for val in aggregate_vals:
if (instance_type.name in
[x.strip() for x in val.split(',')]):
return True
return not aggregate_vals
|
MalloyPower/parsing-python
|
refs/heads/master
|
front-end/testsuite-python-lib/Python-2.3/Lib/plat-mac/Carbon/CarbonEvt.py
|
82
|
from _CarbonEvt import *
|
ypwalter/evennia
|
refs/heads/master
|
evennia/server/profiling/memplot.py
|
4
|
"""
Script that saves memory and idmapper data over time.
Data will be saved to game/logs/memoryusage.log. Note that
the script will append to this file if it already exists.
Call this module directly to plot the log (requires matplotlib and numpy).
"""
import os, sys
import time
#TODO!
#sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))
#os.environ['DJANGO_SETTINGS_MODULE'] = 'game.settings'
import ev
from evennia.utils.idmapper import base as _idmapper
LOGFILE = "logs/memoryusage.log"
INTERVAL = 30 # log every 30 seconds
class Memplot(ev.Script):
"""
Describes a memory plotting action.
"""
def at_script_creation(self):
"Called at script creation"
self.key = "memplot"
self.desc = "Save server memory stats to file"
self.start_delay = False
self.persistent = True
self.interval = INTERVAL
self.db.starttime = time.time()
def at_repeat(self):
"Regularly save memory statistics."
pid = os.getpid()
rmem = float(os.popen('ps -p %d -o %s | tail -1' % (pid, "rss")).read()) / 1000.0 # resident memory
vmem = float(os.popen('ps -p %d -o %s | tail -1' % (pid, "vsz")).read()) / 1000.0 # virtual memory
total_num, cachedict = _idmapper.cache_size()
t0 = (time.time() - self.db.starttime) / 60.0 # save in minutes
with open(LOGFILE, "a") as f:
f.write("%s, %s, %s, %s\n" % (t0, rmem, vmem, int(total_num)))
if __name__ == "__main__":
# plot output from the file
from matplotlib import pyplot as pp
import numpy
data = numpy.genfromtxt("../../../game/" + LOGFILE, delimiter=",")
secs = data[:,0]
rmem = data[:,1]
vmem = data[:,2]
nobj = data[:,3]
# calculate derivative of obj creation
#oderiv = (0.5*(nobj[2:] - nobj[:-2]) / (secs[2:] - secs[:-2])).copy()
#oderiv = (0.5*(rmem[2:] - rmem[:-2]) / (secs[2:] - secs[:-2])).copy()
fig = pp.figure()
ax1 = fig.add_subplot(111)
ax1.set_title("1000 bots (normal players with light building)")
ax1.set_xlabel("Time (mins)")
ax1.set_ylabel("Memory usage (MB)")
ax1.plot(secs, rmem, "r", label="RMEM", lw=2)
ax1.plot(secs, vmem, "b", label="VMEM", lw=2)
ax1.legend(loc="upper left")
ax2 = ax1.twinx()
ax2.plot(secs, nobj, "g--", label="objs in cache", lw=2)
#ax2.plot(secs[:-2], oderiv/60.0, "g--", label="Objs/second", lw=2)
#ax2.plot(secs[:-2], oderiv, "g--", label="Objs/second", lw=2)
ax2.set_ylabel("Number of objects")
ax2.legend(loc="lower right")
ax2.annotate("First 500 bots\nconnecting", xy=(10, 4000))
ax2.annotate("Next 500 bots\nconnecting", xy=(350,10000))
#ax2.annotate("@reload", xy=(185,600))
# # plot mem vs cachesize
# nobj, rmem, vmem = nobj[:262].copy(), rmem[:262].copy(), vmem[:262].copy()
#
# fig = pp.figure()
# ax1 = fig.add_subplot(111)
# ax1.set_title("Memory usage per cache size")
# ax1.set_xlabel("Cache size (number of objects)")
# ax1.set_ylabel("Memory usage (MB)")
# ax1.plot(nobj, rmem, "r", label="RMEM", lw=2)
# ax1.plot(nobj, vmem, "b", label="VMEM", lw=2)
#
## # empirical estimate of memory usage: rmem = 35.0 + 0.0157 * Ncache
## # Ncache = int((rmem - 35.0) / 0.0157) (rmem in MB)
#
# rderiv_aver = 0.0157
# fig = pp.figure()
# ax1 = fig.add_subplot(111)
# ax1.set_title("Relation between memory and cache size")
# ax1.set_xlabel("Memory usage (MB)")
# ax1.set_ylabel("Idmapper Cache Size (number of objects)")
# rmem = numpy.linspace(35, 2000, 2000)
# nobjs = numpy.array([int((mem - 35.0) / 0.0157) for mem in rmem])
# ax1.plot(rmem, nobjs, "r", lw=2)
pp.show()
|
ClockWorkCoin/Code
|
refs/heads/master
|
share/qt/make_spinner.py
|
4415
|
#!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
|
MTASZTAKI/ApertusVR
|
refs/heads/0.9
|
plugins/physics/bulletPhysics/3rdParty/bullet3/examples/pybullet/gym/pybullet_envs/examples/testEnv.py
|
4
|
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
import pybullet_envs
import gym
import argparse
import pybullet as p
def test(args):
count = 0
env = gym.make(args.env)
env.env.configure(args)
print("args.render=", args.render)
if (args.render == 1):
env.render(mode="human")
env.reset()
if (args.resetbenchmark):
while (1):
env.reset()
print("p.getNumBodies()=", p.getNumBodies())
print("count=", count)
count += 1
print("action space:")
sample = env.action_space.sample()
action = sample * 0.0
print("action=")
print(action)
for i in range(args.steps):
obs, rewards, done, _ = env.step(action)
if (args.rgb):
print(env.render(mode="rgb_array"))
print("obs=")
print(obs)
print("rewards")
print(rewards)
print("done")
print(done)
def main():
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--env', help='environment ID', default='AntBulletEnv-v0')
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
parser.add_argument('--render', help='OpenGL Visualizer', type=int, default=0)
parser.add_argument('--rgb', help='rgb_array gym rendering', type=int, default=0)
parser.add_argument('--resetbenchmark',
help='Repeat reset to show reset performance',
type=int,
default=0)
parser.add_argument('--steps', help='Number of steps', type=int, default=1)
args = parser.parse_args()
test(args)
if __name__ == '__main__':
main()
|
pap/rethinkdb
|
refs/heads/next
|
test/rql_test/connections/http_support/werkzeug/testsuite/contrib/__init__.py
|
145
|
# -*- coding: utf-8 -*-
"""
werkzeug.testsuite.contrib
~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests the contrib modules.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import unittest
from werkzeug.testsuite import iter_suites
def suite():
suite = unittest.TestSuite()
for other_suite in iter_suites(__name__):
suite.addTest(other_suite)
return suite
|
aasiutin/electrum
|
refs/heads/master
|
lib/tests/test_simple_config.py
|
5
|
import ast
import sys
import os
import unittest
import tempfile
import shutil
import json
from StringIO import StringIO
from lib.simple_config import (SimpleConfig, read_system_config,
read_user_config)
class Test_SimpleConfig(unittest.TestCase):
def setUp(self):
super(Test_SimpleConfig, self).setUp()
# make sure "read_user_config" and "user_dir" return a temporary directory.
self.electrum_dir = tempfile.mkdtemp()
# Do the same for the user dir to avoid overwriting the real configuration
# for development machines with electrum installed :)
self.user_dir = tempfile.mkdtemp()
self.options = {"electrum_path": self.electrum_dir}
self._saved_stdout = sys.stdout
self._stdout_buffer = StringIO()
sys.stdout = self._stdout_buffer
def tearDown(self):
super(Test_SimpleConfig, self).tearDown()
# Remove the temporary directory after each test (to make sure we don't
# pollute /tmp for nothing.
shutil.rmtree(self.electrum_dir)
shutil.rmtree(self.user_dir)
# Restore the "real" stdout
sys.stdout = self._saved_stdout
def test_simple_config_key_rename(self):
"""auto_cycle was renamed auto_connect"""
fake_read_system = lambda : {}
fake_read_user = lambda _: {"auto_cycle": True}
read_user_dir = lambda : self.user_dir
config = SimpleConfig(options=self.options,
read_system_config_function=fake_read_system,
read_user_config_function=fake_read_user,
read_user_dir_function=read_user_dir)
self.assertEqual(config.get("auto_connect"), True)
self.assertEqual(config.get("auto_cycle"), None)
fake_read_user = lambda _: {"auto_connect": False, "auto_cycle": True}
config = SimpleConfig(options=self.options,
read_system_config_function=fake_read_system,
read_user_config_function=fake_read_user,
read_user_dir_function=read_user_dir)
self.assertEqual(config.get("auto_connect"), False)
self.assertEqual(config.get("auto_cycle"), None)
def test_simple_config_command_line_overrides_everything(self):
"""Options passed by command line override all other configuration
sources"""
fake_read_system = lambda : {"electrum_path": "a"}
fake_read_user = lambda _: {"electrum_path": "b"}
read_user_dir = lambda : self.user_dir
config = SimpleConfig(options=self.options,
read_system_config_function=fake_read_system,
read_user_config_function=fake_read_user,
read_user_dir_function=read_user_dir)
self.assertEqual(self.options.get("electrum_path"),
config.get("electrum_path"))
def test_simple_config_user_config_overrides_system_config(self):
"""Options passed in user config override system config."""
fake_read_system = lambda : {"electrum_path": self.electrum_dir}
fake_read_user = lambda _: {"electrum_path": "b"}
read_user_dir = lambda : self.user_dir
config = SimpleConfig(options={},
read_system_config_function=fake_read_system,
read_user_config_function=fake_read_user,
read_user_dir_function=read_user_dir)
self.assertEqual("b", config.get("electrum_path"))
def test_simple_config_system_config_ignored_if_portable(self):
"""If electrum is started with the "portable" flag, system
configuration is completely ignored."""
fake_read_system = lambda : {"some_key": "some_value"}
fake_read_user = lambda _: {}
read_user_dir = lambda : self.user_dir
config = SimpleConfig(options={"portable": True},
read_system_config_function=fake_read_system,
read_user_config_function=fake_read_user,
read_user_dir_function=read_user_dir)
self.assertEqual(config.get("some_key"), None)
def test_simple_config_user_config_is_used_if_others_arent_specified(self):
"""If no system-wide configuration and no command-line options are
specified, the user configuration is used instead."""
fake_read_system = lambda : {}
fake_read_user = lambda _: {"electrum_path": self.electrum_dir}
read_user_dir = lambda : self.user_dir
config = SimpleConfig(options={},
read_system_config_function=fake_read_system,
read_user_config_function=fake_read_user,
read_user_dir_function=read_user_dir)
self.assertEqual(self.options.get("electrum_path"),
config.get("electrum_path"))
def test_cannot_set_options_passed_by_command_line(self):
fake_read_system = lambda : {}
fake_read_user = lambda _: {"electrum_path": "b"}
read_user_dir = lambda : self.user_dir
config = SimpleConfig(options=self.options,
read_system_config_function=fake_read_system,
read_user_config_function=fake_read_user,
read_user_dir_function=read_user_dir)
config.set_key("electrum_path", "c")
self.assertEqual(self.options.get("electrum_path"),
config.get("electrum_path"))
def test_can_set_options_from_system_config(self):
fake_read_system = lambda : {"electrum_path": self.electrum_dir}
fake_read_user = lambda _: {}
read_user_dir = lambda : self.user_dir
config = SimpleConfig(options={},
read_system_config_function=fake_read_system,
read_user_config_function=fake_read_user,
read_user_dir_function=read_user_dir)
config.set_key("electrum_path", "c")
self.assertEqual("c", config.get("electrum_path"))
def test_can_set_options_set_in_user_config(self):
another_path = tempfile.mkdtemp()
fake_read_system = lambda : {}
fake_read_user = lambda _: {"electrum_path": self.electrum_dir}
read_user_dir = lambda : self.user_dir
config = SimpleConfig(options={},
read_system_config_function=fake_read_system,
read_user_config_function=fake_read_user,
read_user_dir_function=read_user_dir)
config.set_key("electrum_path", another_path)
self.assertEqual(another_path, config.get("electrum_path"))
def test_can_set_options_from_system_config_if_portable(self):
"""If the "portable" flag is set, the user can overwrite system
configuration options."""
another_path = tempfile.mkdtemp()
fake_read_system = lambda : {"electrum_path": self.electrum_dir}
fake_read_user = lambda _: {}
read_user_dir = lambda : self.user_dir
config = SimpleConfig(options={"portable": True},
read_system_config_function=fake_read_system,
read_user_config_function=fake_read_user,
read_user_dir_function=read_user_dir)
config.set_key("electrum_path", another_path)
self.assertEqual(another_path, config.get("electrum_path"))
def test_user_config_is_not_written_with_read_only_config(self):
"""The user config does not contain command-line options or system
options when saved."""
fake_read_system = lambda : {"something": "b"}
fake_read_user = lambda _: {"something": "a"}
read_user_dir = lambda : self.user_dir
self.options.update({"something": "c"})
config = SimpleConfig(options=self.options,
read_system_config_function=fake_read_system,
read_user_config_function=fake_read_user,
read_user_dir_function=read_user_dir)
config.save_user_config()
contents = None
with open(os.path.join(self.electrum_dir, "config"), "r") as f:
contents = f.read()
result = ast.literal_eval(contents)
self.assertEqual({"something": "a"}, result)
class TestSystemConfig(unittest.TestCase):
sample_conf = """
[client]
gap_limit = 5
[something_else]
everything = 42
"""
def setUp(self):
super(TestSystemConfig, self).setUp()
self.thefile = tempfile.mkstemp(suffix=".electrum.test.conf")[1]
def tearDown(self):
super(TestSystemConfig, self).tearDown()
os.remove(self.thefile)
def test_read_system_config_file_does_not_exist(self):
somefile = "/foo/I/do/not/exist/electrum.conf"
result = read_system_config(somefile)
self.assertEqual({}, result)
def test_read_system_config_file_returns_file_options(self):
with open(self.thefile, "w") as f:
f.write(self.sample_conf)
result = read_system_config(self.thefile)
self.assertEqual({"gap_limit": "5"}, result)
def test_read_system_config_file_no_sections(self):
with open(self.thefile, "w") as f:
f.write("gap_limit = 5") # The file has no sections at all
result = read_system_config(self.thefile)
self.assertEqual({}, result)
class TestUserConfig(unittest.TestCase):
def setUp(self):
super(TestUserConfig, self).setUp()
self._saved_stdout = sys.stdout
self._stdout_buffer = StringIO()
sys.stdout = self._stdout_buffer
self.user_dir = tempfile.mkdtemp()
def tearDown(self):
super(TestUserConfig, self).tearDown()
shutil.rmtree(self.user_dir)
sys.stdout = self._saved_stdout
def test_no_path_means_no_result(self):
result = read_user_config(None)
self.assertEqual({}, result)
def test_path_with_reprd_dict(self):
thefile = os.path.join(self.user_dir, "config")
payload = {"gap_limit": 5}
with open(thefile, "w") as f:
f.write(json.dumps(payload))
result = read_user_config(self.user_dir)
self.assertEqual(payload, result)
def test_path_without_config_file(self):
"""We pass a path but if does not contain a "config" file."""
result = read_user_config(self.user_dir)
self.assertEqual({}, result)
def test_path_with_reprd_object(self):
class something(object):
pass
thefile = os.path.join(self.user_dir, "config")
payload = something()
with open(thefile, "w") as f:
f.write(repr(payload))
result = read_user_config(self.user_dir)
self.assertEqual({}, result)
|
lightwang1/beets
|
refs/heads/master
|
beetsplug/fuzzy.py
|
24
|
# This file is part of beets.
# Copyright 2015, Philippe Mongeau.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Provides a fuzzy matching query.
"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
from beets.plugins import BeetsPlugin
from beets.dbcore.query import StringFieldQuery
from beets import config
import difflib
class FuzzyQuery(StringFieldQuery):
@classmethod
def string_match(self, pattern, val):
# smartcase
if pattern.islower():
val = val.lower()
queryMatcher = difflib.SequenceMatcher(None, pattern, val)
threshold = config['fuzzy']['threshold'].as_number()
return queryMatcher.quick_ratio() >= threshold
class FuzzyPlugin(BeetsPlugin):
def __init__(self):
super(FuzzyPlugin, self).__init__()
self.config.add({
'prefix': '~',
'threshold': 0.7,
})
def queries(self):
prefix = self.config['prefix'].get(basestring)
return {prefix: FuzzyQuery}
|
zhenzhai/edx-platform
|
refs/heads/master
|
common/test/acceptance/pages/lms/find_courses.py
|
110
|
"""
Find courses page (main page of the LMS).
"""
from bok_choy.page_object import PageObject
from . import BASE_URL
class FindCoursesPage(PageObject):
"""
Find courses page (main page of the LMS).
"""
url = BASE_URL
def is_browser_on_page(self):
return "edX" in self.browser.title
@property
def course_id_list(self):
"""
Retrieve the list of available course IDs
on the page.
"""
return self.q(css='article.course').attrs('id')
|
rajul/mne-python
|
refs/heads/master
|
examples/datasets/plot_brainstorm_data.py
|
8
|
"""
============================
Brainstorm tutorial datasets
============================
Here we compute the evoked from raw for the Brainstorm
tutorial dataset. For comparison, see:
http://neuroimage.usc.edu/brainstorm/Tutorials/MedianNerveCtf
References
----------
.. [1] Tadel F, Baillet S, Mosher JC, Pantazis D, Leahy RM.
Brainstorm: A User-Friendly Application for MEG/EEG Analysis.
Computational Intelligence and Neuroscience, vol. 2011, Article ID 879716,
13 pages, 2011. doi:10.1155/2011/879716
"""
# Authors: Mainak Jas <mainak.jas@telecom-paristech.fr>
#
# License: BSD (3-clause)
import numpy as np
import mne
from mne.datasets.brainstorm import bst_raw
from mne.io import Raw
print(__doc__)
tmin, tmax, event_id = -0.1, 0.3, 2 # take right-hand somato
reject = dict(mag=4e-12, eog=250e-6)
data_path = bst_raw.data_path()
raw_fname = data_path + '/MEG/bst_raw/' + \
'subj001_somatosensory_20111109_01_AUX-f_raw.fif'
raw = Raw(raw_fname, preload=True)
raw.plot()
# set EOG channel
raw.set_channel_types({'EEG058': 'eog'})
# show power line interference and remove it
raw.plot_psd()
raw.notch_filter(np.arange(60, 181, 60))
events = mne.find_events(raw, stim_channel='UPPT001')
# pick MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
exclude='bads')
# Compute epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject, preload=False)
# compute evoked
evoked = epochs.average()
# remove physiological artifacts (eyeblinks, heartbeats) using SSP on baseline
evoked.add_proj(mne.compute_proj_evoked(evoked.crop(tmax=0, copy=True)))
evoked.apply_proj()
# fix stim artifact
mne.preprocessing.fix_stim_artifact(evoked)
# correct delays due to hardware (stim artifact is at 4 ms)
evoked.shift_time(-0.004)
# plot the result
evoked.plot()
# show topomaps
evoked.plot_topomap(times=np.array([0.016, 0.030, 0.060, 0.070]))
|
flake123p/ProjectH
|
refs/heads/master
|
Python/_Basics_/A01_CreateEXE/test.py
|
2
|
import urllib.request
headStr = "http://image.slidesharecdn.com/cc2500-130410225125-phpapp01/95/cc2500-"
tailStr = "-1024.jpg?cb=1365661093"
for i in range(1, 43):
#Input String
linkStr = headStr + str(i) + tailStr
#Output String
if i < 10:
outputFileStr = "00" + str(i) + ".png"
elif i < 100:
outputFileStr = "0" + str(i) + ".png"
else:
outputFileStr = str(i) + ".png"
#Get Page/File ...
r = urllib.request.urlopen(linkStr)
#Save to file
with open(outputFileStr, 'b+w') as f:
f.write(r.read())
print("Download: " + outputFileStr + " complete ...")
|
archf/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/vmware/vmware_vsan_cluster.py
|
47
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Russell Teague <rteague2 () csc.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vmware_vsan_cluster
short_description: Configure VSAN clustering on an ESXi host
description:
- This module can be used to configure VSAN clustering on an ESXi host
version_added: 2.0
author: "Russell Teague (@mtnbikenc)"
notes:
- Tested on vSphere 5.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
cluster_uuid:
description:
- Desired cluster UUID
required: False
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
# Example command from Ansible Playbook
- name: Configure VMware VSAN Cluster
hosts: deploy_node
gather_facts: False
tags:
- vsan
tasks:
- name: Configure VSAN on first host
vmware_vsan_cluster:
hostname: "{{ groups['esxi'][0] }}"
username: "{{ esxi_username }}"
password: "{{ site_password }}"
register: vsan_cluster
- name: Configure VSAN on remaining hosts
vmware_vsan_cluster:
hostname: "{{ item }}"
username: "{{ esxi_username }}"
password: "{{ site_password }}"
cluster_uuid: "{{ vsan_cluster.cluster_uuid }}"
with_items: "{{ groups['esxi'][1:] }}"
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import (HAS_PYVMOMI, connect_to_api, get_all_objs, vmware_argument_spec,
wait_for_task)
def create_vsan_cluster(host_system, new_cluster_uuid):
host_config_manager = host_system.configManager
vsan_system = host_config_manager.vsanSystem
vsan_config = vim.vsan.host.ConfigInfo()
vsan_config.enabled = True
if new_cluster_uuid is not None:
vsan_config.clusterInfo = vim.vsan.host.ConfigInfo.ClusterInfo()
vsan_config.clusterInfo.uuid = new_cluster_uuid
vsan_config.storageInfo = vim.vsan.host.ConfigInfo.StorageInfo()
vsan_config.storageInfo.autoClaimStorage = True
task = vsan_system.UpdateVsan_Task(vsan_config)
changed, result = wait_for_task(task)
host_status = vsan_system.QueryHostStatus()
cluster_uuid = host_status.uuid
return changed, result, cluster_uuid
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(cluster_uuid=dict(required=False, type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
new_cluster_uuid = module.params['cluster_uuid']
try:
content = connect_to_api(module, False)
host = get_all_objs(content, [vim.HostSystem])
if not host:
module.fail_json(msg="Unable to locate Physical Host.")
host_system = host.keys()[0]
changed, result, cluster_uuid = create_vsan_cluster(host_system, new_cluster_uuid)
module.exit_json(changed=changed, result=result, cluster_uuid=cluster_uuid)
except vmodl.RuntimeFault as runtime_fault:
module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
module.fail_json(msg=method_fault.msg)
except Exception as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
mne-tools/mne-tools.github.io
|
refs/heads/main
|
0.11/_downloads/plot_raw_objects.py
|
15
|
"""
.. _tut_raw_objects
The :class:`Raw <mne.io.RawFIF>` data structure: continuous data
================================================================
"""
from __future__ import print_function
import mne
import os.path as op
from matplotlib import pyplot as plt
###############################################################################
# Continuous data is stored in objects of type :class:`Raw <mne.io.RawFIF>`.
# The core data structure is simply a 2D numpy array (channels × samples,
# `._data`) combined with an :class:`Info <mne.io.meas_info.Info>` object
# (`.info`) (:ref:`tut_info_objects`.
#
# The most common way to load continuous data is from a .fif file. For more
# information on :ref:`loading data from other formats <ch_raw>`, or creating
# it :ref:`from scratch <tut_creating_data_structures>`.
###############################################################################
# Loading continuous data
# -----------------------
# Load an example dataset, the preload flag loads the data into memory now
data_path = op.join(mne.datasets.sample.data_path(), 'MEG',
'sample', 'sample_audvis_raw.fif')
raw = mne.io.RawFIF(data_path, preload=True, verbose=False)
# Give the sample rate
print('sample rate:', raw.info['sfreq'], 'Hz')
# Give the size of the data matrix
print('channels x samples:', raw._data.shape)
###############################################################################
# Information about the channels contained in the :class:`Raw <mne.io.RawFIF>`
# object is contained in the :class:`Info <mne.io.meas_info.Info>` attribute.
# This is essentially a dictionary with a number of relevant fields (see
# :ref:`tut_info_objects`).
###############################################################################
# Indexing data
# -------------
#
# There are two ways to access the data stored within :class:`Raw
# <mne.io.RawFIF>` objects. One is by accessing the underlying data array, and
# the other is to index the :class:`Raw <mne.io.RawFIF>` object directly.
#
# To access the data array of :class:`Raw <mne.io.Raw>` objects, use the
# `_data` attribute. Note that this is only present if `preload==True`.
print('Shape of data array:', raw._data.shape)
array_data = raw._data[0, :1000]
_ = plt.plot(array_data)
###############################################################################
# You can also pass an index directly to the :class:`Raw <mne.io.RawFIF>`
# object. This will return an array of times, as well as the data representing
# those timepoints. This may be used even if the data is not preloaded:
# Extract data from the first 5 channels, from 1 s to 3 s.
sfreq = raw.info['sfreq']
data, times = raw[:5, int(sfreq * 1):int(sfreq * 3)]
_ = plt.plot(times, data.T)
_ = plt.title('Sample channels')
###############################################################################
# -----------------------------------------
# Selecting subsets of channels and samples
# -----------------------------------------
#
# It is possible to use more intelligent indexing to extract data, using
# channel names, types or time ranges.
# Pull all MEG gradiometer channels:
# Make sure to use copy==True or it will overwrite the data
meg_only = raw.pick_types(meg=True, copy=True)
eeg_only = raw.pick_types(meg=False, eeg=True, copy=True)
# The MEG flag in particular lets you specify a string for more specificity
grad_only = raw.pick_types(meg='grad', copy=True)
# Or you can use custom channel names
pick_chans = ['MEG 0112', 'MEG 0111', 'MEG 0122', 'MEG 0123']
specific_chans = raw.pick_channels(pick_chans, copy=True)
print(meg_only, eeg_only, grad_only, specific_chans, sep='\n')
###############################################################################
# Notice the different scalings of these types
f, (a1, a2) = plt.subplots(2, 1)
eeg, times = eeg_only[0, :int(sfreq * 2)]
meg, times = meg_only[0, :int(sfreq * 2)]
a1.plot(times, meg[0])
a2.plot(times, eeg[0])
###############################################################################
# You can restrict the data to a specific time range
restricted = raw.crop(5, 7) # in seconds
print('New time range from', restricted.times.min(), 's to',
restricted.times.max(), 's')
###############################################################################
# And drop channels by name
restricted = restricted.drop_channels(['MEG 0241', 'EEG 001'])
print('Number of channels reduced from', raw.info['nchan'], 'to',
restricted.info['nchan'])
###############################################################################
# --------------------------------------------------
# Concatenating :class:`Raw <mne.io.RawFIF>` objects
# --------------------------------------------------
#
# :class:`Raw <mne.io.RawFIF>` objects can be concatenated in time by using the
# :func:`append <mne.io.RawFIF.append>` function. For this to work, they must
# have the same number of channels and their :class:`Info
# <mne.io.meas_info.Info>` structures should be compatible.
# Create multiple :class:`Raw <mne.io.RawFIF>` objects
raw1 = raw.copy().crop(0, 10)
raw2 = raw.copy().crop(10, 20)
raw3 = raw.copy().crop(20, 100)
# Concatenate in time (also works without preloading)
raw1.append([raw2, raw3])
print('Time extends from', raw1.times.min(), 's to', raw1.times.max(), 's')
|
ovnicraft/edx-platform
|
refs/heads/master
|
common/test/acceptance/pages/studio/__init__.py
|
232
|
import os
# Get the URL of the instance under test
BASE_URL = os.environ.get('test_url', 'http://localhost:8031')
|
gkawamoto/yowsup
|
refs/heads/master
|
yowsup/layers/axolotl/protocolentities/iq_keys_get_result.py
|
48
|
from yowsup.layers.protocol_iq.protocolentities import ResultIqProtocolEntity
from yowsup.structs import ProtocolTreeNode
from axolotl.state.prekeybundle import PreKeyBundle
from axolotl.identitykey import IdentityKey
from axolotl.ecc.curve import Curve
from axolotl.ecc.djbec import DjbECPublicKey
import binascii
import sys
class ResultGetKeysIqProtocolEntity(ResultIqProtocolEntity):
"""
<iq type="result" from="s.whatsapp.net" id="3">
<list>
<user jid="79049347231@s.whatsapp.net">
<registration>
HEX:7a9cec4b</registration>
<type>
HEX:05</type>
<identity>
HEX:eeb668c8d062c99b43560c811acfe6e492798b496767eb060d99e011d3862369</identity>
<skey>
<id>
HEX:000000</id>
<value>
HEX:a1b5216ce4678143fb20aaaa2711a8c2b647230164b79414f0550b4e611ccd6c</value>
<signature>
HEX:94c231327fcd664b34603838b5e9ba926718d71c206e92b2b400f5cf4ae7bf17d83557bf328c1be6d51efdbd731a26d000adb8f38f140b1ea2a5fd3df2688085</signature>
</skey>
<key>
<id>
HEX:36b545</id>
<value>
HEX:c20826f622bec24b349ced38f1854bdec89ba098ef4c06b2402800d33e9aff61</value>
</key>
</user>
</list>
</iq>
"""
def __init__(self, _id, preKeyBundleMap = None):
super(ResultGetKeysIqProtocolEntity, self).__init__(_from = "s.whatsapp.net", _id=_id)
self.setPreKeyBundleMap(preKeyBundleMap)
def getJids(self):
return self.preKeyBundleMap.keys()
def setPreKeyBundleMap(self, preKeyBundleMap = None):
self.preKeyBundleMap = preKeyBundleMap or {}
def setPreKeyBundleFor(self, jid, preKeyBundle):
self.preKeyBundleMap[jid] = preKeyBundle
def getPreKeyBundleFor(self, jid):
if jid in self.preKeyBundleMap:
return self.preKeyBundleMap[jid]
@staticmethod
def _intToBytes(val):
return binascii.unhexlify(format(val, 'x').zfill(8).encode())
@staticmethod
def _bytesToInt(val):
if sys.version_info >= (3,0):
valEnc = val.encode('latin-1') if type(val) is str else val
else:
valEnc = val
return int(binascii.hexlify(valEnc), 16)
@staticmethod
def encStr(string):
if sys.version_info >= (3,0) and type(string) is str:
return string.encode('latin-1')
return string
@staticmethod
def fromProtocolTreeNode(node):
entity = ResultIqProtocolEntity.fromProtocolTreeNode(node)
entity.__class__ = ResultGetKeysIqProtocolEntity
entity.setPreKeyBundleMap()
userNodes = node.getChild("list").getAllChildren()
for userNode in userNodes:
preKeyNode = userNode.getChild("key")
signedPreKeyNode = userNode.getChild("skey")
registrationId = ResultGetKeysIqProtocolEntity._bytesToInt(userNode.getChild("registration").getData())
identityKey = IdentityKey(DjbECPublicKey(ResultGetKeysIqProtocolEntity.encStr(userNode.getChild("identity").getData())))
preKeyId = ResultGetKeysIqProtocolEntity._bytesToInt(preKeyNode.getChild("id").getData())
preKeyPublic = DjbECPublicKey(ResultGetKeysIqProtocolEntity.encStr(preKeyNode.getChild("value").getData()))
signedPreKeyId = ResultGetKeysIqProtocolEntity._bytesToInt(signedPreKeyNode.getChild("id").getData())
signedPreKeySig = ResultGetKeysIqProtocolEntity.encStr(signedPreKeyNode.getChild("signature").getData())
signedPreKeyPub = DjbECPublicKey(ResultGetKeysIqProtocolEntity.encStr(signedPreKeyNode.getChild("value").getData()))
preKeyBundle = PreKeyBundle(registrationId, 1, preKeyId, preKeyPublic,
signedPreKeyId, signedPreKeyPub, signedPreKeySig, identityKey)
entity.setPreKeyBundleFor(userNode["jid"], preKeyBundle)
return entity
def toProtocolTreeNode(self):
node = super(ResultGetKeysIqProtocolEntity, self).toProtocolTreeNode()
listNode = ProtocolTreeNode("list")
node.addChild(listNode)
for jid, preKeyBundle in self.preKeyBundleMap.items():
userNode = ProtocolTreeNode("user", {"jid": jid})
registrationNode = ProtocolTreeNode("registration", data = self.__class__._intToBytes(preKeyBundle.getRegistrationId()))
typeNode = ProtocolTreeNode("type", data = self.__class__._intToBytes(Curve.DJB_TYPE))
identityNode = ProtocolTreeNode("identity", data = preKeyBundle.getIdentityKey().getPublicKey().getPublicKey())
skeyNode = ProtocolTreeNode("skey")
skeyNode_idNode = ProtocolTreeNode("id", data=self.__class__._intToBytes(preKeyBundle.getSignedPreKeyId()))
skeyNode_valueNode = ProtocolTreeNode("value", data=preKeyBundle.getSignedPreKey().getPublicKey())
skeyNode_signatureNode = ProtocolTreeNode("signature", data=preKeyBundle.getSignedPreKeySignature())
skeyNode.addChildren([skeyNode_idNode, skeyNode_valueNode, skeyNode_signatureNode])
preKeyNode = ProtocolTreeNode("key")
preKeyNode_idNode = ProtocolTreeNode("id", data = self.__class__._intToBytes(preKeyBundle.getPreKeyId()))
preKeyNode_valueNode = ProtocolTreeNode("value", data= preKeyBundle.getPreKey().getPublicKey())
preKeyNode.addChildren([preKeyNode_idNode, preKeyNode_valueNode])
userNode.addChildren([
registrationNode,
typeNode,
identityNode,
skeyNode,
preKeyNode
])
listNode.addChild(userNode)
return node
|
yaolinz/rethinkdb
|
refs/heads/next
|
external/v8_3.30.33.16/build/gyp/test/intermediate_dir/gyptest-intermediate-dir.py
|
243
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that targets have independent INTERMEDIATE_DIRs.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('test.gyp', chdir='src')
test.build('test.gyp', 'target1', chdir='src')
# Check stuff exists.
intermediate_file1 = test.read('src/outfile.txt')
test.must_contain(intermediate_file1, 'target1')
shared_intermediate_file1 = test.read('src/shared_outfile.txt')
test.must_contain(shared_intermediate_file1, 'shared_target1')
test.run_gyp('test2.gyp', chdir='src')
# Force the shared intermediate to be rebuilt.
test.sleep()
test.touch('src/shared_infile.txt')
test.build('test2.gyp', 'target2', chdir='src')
# Check INTERMEDIATE_DIR file didn't get overwritten but SHARED_INTERMEDIATE_DIR
# file did.
intermediate_file2 = test.read('src/outfile.txt')
test.must_contain(intermediate_file1, 'target1')
test.must_contain(intermediate_file2, 'target2')
shared_intermediate_file2 = test.read('src/shared_outfile.txt')
if shared_intermediate_file1 != shared_intermediate_file2:
test.fail_test(shared_intermediate_file1 + ' != ' + shared_intermediate_file2)
test.must_contain(shared_intermediate_file1, 'shared_target2')
test.must_contain(shared_intermediate_file2, 'shared_target2')
test.pass_test()
|
tsabi/Odoo-tsabi-fixes
|
refs/heads/master
|
addons/account_analytic_plans/report/crossovered_analytic.py
|
321
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
class crossovered_analytic(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(crossovered_analytic, self).__init__(cr, uid, name, context = context)
self.localcontext.update( {
'time': time,
'lines': self._lines,
'ref_lines': self._ref_lines,
'find_children': self.find_children,
})
self.base_amount = 0.00
def find_children(self, ref_ids):
if not ref_ids: return []
to_return_ids = []
final_list = []
parent_list = []
set_list = []
analytic_obj = self.pool.get('account.analytic.account')
for id in ref_ids:
# to avoid duplicate entries
if id not in to_return_ids:
to_return_ids.append(analytic_obj.search(self.cr,self.uid,[('parent_id','child_of',[id])]))
data_accnt = analytic_obj.browse(self.cr,self.uid,to_return_ids[0])
for data in data_accnt:
if data.parent_id and data.parent_id.id == ref_ids[0]:
parent_list.append(data.id)
final_list.append(ref_ids[0])
set_list = self.set_account(parent_list)
final_list.extend(set_list)
return final_list #to_return_ids[0]
def set_account(self, cats):
lst = []
category = self.pool.get('account.analytic.account').read(self.cr, self.uid, cats)
for cat in category:
lst.append(cat['id'])
if cat['child_ids']:
lst.extend(self.set_account(cat['child_ids']))
return lst
def _ref_lines(self, form):
result = []
res = {}
acc_pool = self.pool.get('account.analytic.account')
line_pool = self.pool.get('account.analytic.line')
self.dict_acc_ref = {}
if form['journal_ids']:
journal = " in (" + ','.join(map(lambda x: str(x), form['journal_ids'])) + ")"
else:
journal = 'is not null'
query_general = "SELECT id FROM account_analytic_line WHERE (journal_id " + journal +") AND date>='"+ str(form['date1']) +"'"" AND date<='" + str(form['date2']) + "'"
self.cr.execute(query_general)
l_ids = self.cr.fetchall()
line_ids = [x[0] for x in l_ids]
obj_line = line_pool.browse(self.cr,self.uid,line_ids)
#this structure will be usefull for easily knowing the account_analytic_line that are related to the reference account. At this purpose, we save the move_id of analytic lines.
self.dict_acc_ref[form['ref']] = []
children_list = acc_pool.search(self.cr, self.uid, [('parent_id', 'child_of', [form['ref']])])
for obj in obj_line:
if obj.account_id.id in children_list:
if obj.move_id and obj.move_id.id not in self.dict_acc_ref[form['ref']]:
self.dict_acc_ref[form['ref']].append(obj.move_id.id)
res['ref_name'] = acc_pool.name_get(self.cr, self.uid, [form['ref']])[0][1]
res['ref_code'] = acc_pool.browse(self.cr, self.uid, form['ref']).code
self.final_list = children_list
selected_ids = line_pool.search(self.cr, self.uid, [('account_id', 'in' ,self.final_list)])
res['ref_qty'] = 0.0
res['ref_amt'] = 0.0
self.base_amount = 0.0
if selected_ids:
query = "SELECT SUM(aal.amount) AS amt, SUM(aal.unit_amount) AS qty FROM account_analytic_line AS aal, account_analytic_account AS aaa \
WHERE aal.account_id = aaa.id AND aal.id IN ("+','.join(map(str,selected_ids))+") AND (aal.journal_id " + journal +") AND aal.date>='"+ str(form['date1']) +"'"" AND aal.date<='" + str(form['date2']) + "'"
self.cr.execute(query)
info=self.cr.dictfetchall()
res['ref_qty'] = info[0]['qty']
res['ref_amt'] = info[0]['amt']
self.base_amount = info[0]['amt']
result.append(res)
return result
def _lines(self, form, ids=None):
if ids is None:
ids = {}
if not ids:
ids = self.ids
if form['journal_ids']:
journal=" in (" + ','.join(map(lambda x: str(x), form['journal_ids'])) + ")"
else:
journal= 'is not null'
acc_pool = self.pool.get('account.analytic.account')
line_pool = self.pool.get('account.analytic.line')
acc_id = []
final = []
self.list_ids = []
self.final_list = self.find_children(ids)
for acc_id in self.final_list:
selected_ids = line_pool.search(self.cr, self.uid, [('account_id','=',acc_id), ('move_id', 'in', self.dict_acc_ref[form['ref']])])
if selected_ids:
query="SELECT aaa.code AS code, SUM(aal.amount) AS amt, SUM(aal.unit_amount) AS qty, aaa.name AS acc_name, aal.account_id AS id FROM account_analytic_line AS aal, account_analytic_account AS aaa \
WHERE aal.account_id=aaa.id AND aal.id IN ("+','.join(map(str,selected_ids))+") AND (aal.journal_id " + journal +") AND aal.date>='"+ str(form['date1']) +"'"" AND aal.date<='" + str(form['date2']) + "'"" GROUP BY aal.account_id,aaa.name,aaa.code ORDER BY aal.account_id"
self.cr.execute(query)
res = self.cr.dictfetchall()
if res:
for element in res:
if self.base_amount <> 0.00:
element['perc'] = (element['amt'] / self.base_amount) * 100.00
else:
element['perc'] = 0.00
else:
result = {}
res = []
result['id'] = acc_id
data_account = acc_pool.browse(self.cr, self.uid, acc_id)
result['acc_name'] = data_account.name
result['code'] = data_account.code
result['amt'] = result['qty'] = result['perc'] = 0.00
if not form['empty_line']:
res.append(result)
else:
result = {}
res = []
result['id'] = acc_id
data_account = acc_pool.browse(self.cr, self.uid, acc_id)
result['acc_name'] = data_account.name
result['code'] = data_account.code
result['amt'] = result['qty'] = result['perc'] = 0.00
if not form['empty_line']:
res.append(result)
for item in res:
obj_acc = acc_pool.name_get(self.cr,self.uid,[item['id']])
item['acc_name'] = obj_acc[0][1]
final.append(item)
return final
class report_crossoveredanalyticplans(osv.AbstractModel):
_name = 'report.account_analytic_plans.report_crossoveredanalyticplans'
_inherit = 'report.abstract_report'
_template = 'account_analytic_plans.report_crossoveredanalyticplans'
_wrapped_report_class = crossovered_analytic
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
huran2014/huran.github.io
|
refs/heads/master
|
wot_gateway/usr/lib/python2.7/timeit.py
|
99
|
#! /usr/bin/env python
"""Tool for measuring execution time of small code snippets.
This module avoids a number of common traps for measuring execution
times. See also Tim Peters' introduction to the Algorithms chapter in
the Python Cookbook, published by O'Reilly.
Library usage: see the Timer class.
Command line usage:
python timeit.py [-n N] [-r N] [-s S] [-t] [-c] [-h] [--] [statement]
Options:
-n/--number N: how many times to execute 'statement' (default: see below)
-r/--repeat N: how many times to repeat the timer (default 3)
-s/--setup S: statement to be executed once initially (default 'pass')
-t/--time: use time.time() (default on Unix)
-c/--clock: use time.clock() (default on Windows)
-v/--verbose: print raw timing results; repeat for more digits precision
-h/--help: print this usage message and exit
--: separate options from statement, use when statement starts with -
statement: statement to be timed (default 'pass')
A multi-line statement may be given by specifying each line as a
separate argument; indented lines are possible by enclosing an
argument in quotes and using leading spaces. Multiple -s options are
treated similarly.
If -n is not given, a suitable number of loops is calculated by trying
successive powers of 10 until the total time is at least 0.2 seconds.
The difference in default timer function is because on Windows,
clock() has microsecond granularity but time()'s granularity is 1/60th
of a second; on Unix, clock() has 1/100th of a second granularity and
time() is much more precise. On either platform, the default timer
functions measure wall clock time, not the CPU time. This means that
other processes running on the same computer may interfere with the
timing. The best thing to do when accurate timing is necessary is to
repeat the timing a few times and use the best time. The -r option is
good for this; the default of 3 repetitions is probably enough in most
cases. On Unix, you can use clock() to measure CPU time.
Note: there is a certain baseline overhead associated with executing a
pass statement. The code here doesn't try to hide it, but you should
be aware of it. The baseline overhead can be measured by invoking the
program without arguments.
The baseline overhead differs between Python versions! Also, to
fairly compare older Python versions to Python 2.3, you may want to
use python -O for the older versions to avoid timing SET_LINENO
instructions.
"""
import gc
import sys
import time
try:
import itertools
except ImportError:
# Must be an older Python version (see timeit() below)
itertools = None
__all__ = ["Timer"]
dummy_src_name = "<timeit-src>"
default_number = 1000000
default_repeat = 3
if sys.platform == "win32":
# On Windows, the best timer is time.clock()
default_timer = time.clock
else:
# On most other platforms the best timer is time.time()
default_timer = time.time
# Don't change the indentation of the template; the reindent() calls
# in Timer.__init__() depend on setup being indented 4 spaces and stmt
# being indented 8 spaces.
template = """
def inner(_it, _timer):
%(setup)s
_t0 = _timer()
for _i in _it:
%(stmt)s
_t1 = _timer()
return _t1 - _t0
"""
def reindent(src, indent):
"""Helper to reindent a multi-line statement."""
return src.replace("\n", "\n" + " "*indent)
def _template_func(setup, func):
"""Create a timer function. Used if the "statement" is a callable."""
def inner(_it, _timer, _func=func):
setup()
_t0 = _timer()
for _i in _it:
_func()
_t1 = _timer()
return _t1 - _t0
return inner
class Timer:
"""Class for timing execution speed of small code snippets.
The constructor takes a statement to be timed, an additional
statement used for setup, and a timer function. Both statements
default to 'pass'; the timer function is platform-dependent (see
module doc string).
To measure the execution time of the first statement, use the
timeit() method. The repeat() method is a convenience to call
timeit() multiple times and return a list of results.
The statements may contain newlines, as long as they don't contain
multi-line string literals.
"""
def __init__(self, stmt="pass", setup="pass", timer=default_timer):
"""Constructor. See class doc string."""
self.timer = timer
ns = {}
if isinstance(stmt, basestring):
stmt = reindent(stmt, 8)
if isinstance(setup, basestring):
setup = reindent(setup, 4)
src = template % {'stmt': stmt, 'setup': setup}
elif hasattr(setup, '__call__'):
src = template % {'stmt': stmt, 'setup': '_setup()'}
ns['_setup'] = setup
else:
raise ValueError("setup is neither a string nor callable")
self.src = src # Save for traceback display
code = compile(src, dummy_src_name, "exec")
exec code in globals(), ns
self.inner = ns["inner"]
elif hasattr(stmt, '__call__'):
self.src = None
if isinstance(setup, basestring):
_setup = setup
def setup():
exec _setup in globals(), ns
elif not hasattr(setup, '__call__'):
raise ValueError("setup is neither a string nor callable")
self.inner = _template_func(setup, stmt)
else:
raise ValueError("stmt is neither a string nor callable")
def print_exc(self, file=None):
"""Helper to print a traceback from the timed code.
Typical use:
t = Timer(...) # outside the try/except
try:
t.timeit(...) # or t.repeat(...)
except:
t.print_exc()
The advantage over the standard traceback is that source lines
in the compiled template will be displayed.
The optional file argument directs where the traceback is
sent; it defaults to sys.stderr.
"""
import linecache, traceback
if self.src is not None:
linecache.cache[dummy_src_name] = (len(self.src),
None,
self.src.split("\n"),
dummy_src_name)
# else the source is already stored somewhere else
traceback.print_exc(file=file)
def timeit(self, number=default_number):
"""Time 'number' executions of the main statement.
To be precise, this executes the setup statement once, and
then returns the time it takes to execute the main statement
a number of times, as a float measured in seconds. The
argument is the number of times through the loop, defaulting
to one million. The main statement, the setup statement and
the timer function to be used are passed to the constructor.
"""
if itertools:
it = itertools.repeat(None, number)
else:
it = [None] * number
gcold = gc.isenabled()
gc.disable()
try:
timing = self.inner(it, self.timer)
finally:
if gcold:
gc.enable()
return timing
def repeat(self, repeat=default_repeat, number=default_number):
"""Call timeit() a few times.
This is a convenience function that calls the timeit()
repeatedly, returning a list of results. The first argument
specifies how many times to call timeit(), defaulting to 3;
the second argument specifies the timer argument, defaulting
to one million.
Note: it's tempting to calculate mean and standard deviation
from the result vector and report these. However, this is not
very useful. In a typical case, the lowest value gives a
lower bound for how fast your machine can run the given code
snippet; higher values in the result vector are typically not
caused by variability in Python's speed, but by other
processes interfering with your timing accuracy. So the min()
of the result is probably the only number you should be
interested in. After that, you should look at the entire
vector and apply common sense rather than statistics.
"""
r = []
for i in range(repeat):
t = self.timeit(number)
r.append(t)
return r
def timeit(stmt="pass", setup="pass", timer=default_timer,
number=default_number):
"""Convenience function to create Timer object and call timeit method."""
return Timer(stmt, setup, timer).timeit(number)
def repeat(stmt="pass", setup="pass", timer=default_timer,
repeat=default_repeat, number=default_number):
"""Convenience function to create Timer object and call repeat method."""
return Timer(stmt, setup, timer).repeat(repeat, number)
def main(args=None):
"""Main program, used when run as a script.
The optional argument specifies the command line to be parsed,
defaulting to sys.argv[1:].
The return value is an exit code to be passed to sys.exit(); it
may be None to indicate success.
When an exception happens during timing, a traceback is printed to
stderr and the return value is 1. Exceptions at other times
(including the template compilation) are not caught.
"""
if args is None:
args = sys.argv[1:]
import getopt
try:
opts, args = getopt.getopt(args, "n:s:r:tcvh",
["number=", "setup=", "repeat=",
"time", "clock", "verbose", "help"])
except getopt.error, err:
print err
print "use -h/--help for command line help"
return 2
timer = default_timer
stmt = "\n".join(args) or "pass"
number = 0 # auto-determine
setup = []
repeat = default_repeat
verbose = 0
precision = 3
for o, a in opts:
if o in ("-n", "--number"):
number = int(a)
if o in ("-s", "--setup"):
setup.append(a)
if o in ("-r", "--repeat"):
repeat = int(a)
if repeat <= 0:
repeat = 1
if o in ("-t", "--time"):
timer = time.time
if o in ("-c", "--clock"):
timer = time.clock
if o in ("-v", "--verbose"):
if verbose:
precision += 1
verbose += 1
if o in ("-h", "--help"):
print __doc__,
return 0
setup = "\n".join(setup) or "pass"
# Include the current directory, so that local imports work (sys.path
# contains the directory of this script, rather than the current
# directory)
import os
sys.path.insert(0, os.curdir)
t = Timer(stmt, setup, timer)
if number == 0:
# determine number so that 0.2 <= total time < 2.0
for i in range(1, 10):
number = 10**i
try:
x = t.timeit(number)
except:
t.print_exc()
return 1
if verbose:
print "%d loops -> %.*g secs" % (number, precision, x)
if x >= 0.2:
break
try:
r = t.repeat(repeat, number)
except:
t.print_exc()
return 1
best = min(r)
if verbose:
print "raw times:", " ".join(["%.*g" % (precision, x) for x in r])
print "%d loops," % number,
usec = best * 1e6 / number
if usec < 1000:
print "best of %d: %.*g usec per loop" % (repeat, precision, usec)
else:
msec = usec / 1000
if msec < 1000:
print "best of %d: %.*g msec per loop" % (repeat, precision, msec)
else:
sec = msec / 1000
print "best of %d: %.*g sec per loop" % (repeat, precision, sec)
return None
if __name__ == "__main__":
sys.exit(main())
|
aewallin/opencamlib
|
refs/heads/master
|
examples/python/drop-cutter/batchdropcutter_mtrush.py
|
1
|
import ocl
import pyocl
import camvtk
import time
import vtk
import datetime
import math
if __name__ == "__main__":
print(ocl.version())
myscreen = camvtk.VTKScreen()
#stl = camvtk.STLSurf("../stl/gnu_tux_mod.stl")
stl = camvtk.STLSurf("../stl/mount_rush.stl")
#stl = camvtk.STLSurf("../stl/pycam-textbox.stl")
myscreen.addActor(stl)
stl.SetWireframe()
stl.SetColor((0.5,0.5,0.5))
polydata = stl.src.GetOutput()
s = ocl.STLSurf()
camvtk.vtkPolyData2OCLSTL(polydata, s)
print("STL surface with", s.size(), "triangles read")
# define a cutter
length=5
cutter = ocl.BallCutter(15.4321, length)
#cutter = ocl.CylCutter(1.123, length)
#cutter = ocl.BullCutter(1.123, 0.2, length)
#cutter = ocl.ConeCutter(0.43, math.pi/7, length)
print(cutter)
#define grid of CL-points
minx=-42
dx=0.5
maxx=47
miny=-27
dy=0.2
maxy=20
z=-55
clpoints = pyocl.CLPointGrid(minx,dx,maxx,miny,dy,maxy,z)
print("generated grid with", len(clpoints)," CL-points")
# batchdropcutter
bdc1 = ocl.BatchDropCutter()
bdc1.setSTL(s)
bdc1.setCutter(cutter)
# push the points to ocl
for p in clpoints:
bdc1.appendPoint(p)
# run the actual calculation
t_before = time.time()
bdc1.run()
t_after = time.time()
calctime = t_after-t_before
print(" done in ", calctime," s" )
# get back results from ocl
clpts = bdc1.getCLPoints()
# draw the results
print("rendering...",)
camvtk.drawCLPointCloud(myscreen, clpts)
print("done")
myscreen.camera.SetPosition(25, 23, 15)
myscreen.camera.SetFocalPoint(4, 5, 0)
# ocl text
t = camvtk.Text()
t.SetText("OpenCAMLib")
t.SetPos( (myscreen.width-200, myscreen.height-30) )
myscreen.addActor( t)
# other text
t2 = camvtk.Text()
stltext = "%i triangles\n%i CL-points\n%0.1f seconds" % (s.size(), len(clpts), calctime)
t2.SetText(stltext)
t2.SetPos( (50, myscreen.height-100) )
myscreen.addActor( t2)
t3 = camvtk.Text()
ctext = "Cutter: %s" % ( str(cutter) )
t3.SetText(ctext)
t3.SetPos( (50, myscreen.height-150) )
myscreen.addActor( t3)
myscreen.render()
myscreen.iren.Start()
raw_input("Press Enter to terminate")
|
vmora/QGIS
|
refs/heads/master
|
python/plugins/processing/algs/grass7/ext/r_resamp_filter.py
|
12
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
r_resamp_filter.py
------------------
Date : February 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'February 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
def checkParameterValuesBeforeExecuting(alg, parameters, context):
""" Verify if we have the right parameters """
radius = alg.parameterAsString(parameters, 'radius', context)
x_radius = alg.parameterAsString(parameters, 'x_radius', context)
y_radius = alg.parameterAsString(parameters, 'y_radius', context)
if (not radius and not x_radius and not y_radius) or (radius and (x_radius or y_radius)):
return False, alg.tr("You need to set either radius or x_radius and y_radius!")
elif (x_radius and not y_radius) or (y_radius and not x_radius):
return False, alg.tr("You need to set x_radius and y_radius!")
return True, None
|
costadorione/purestream
|
refs/heads/master
|
servers/dailymotion.py
|
1
|
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------------------------------------------------------
# streamondemand - XBMC Plugin
# Conector para dailymotion
# http://www.mimediacenter.info/foro/viewforum.php?f=36
# ---------------------------------------------------------------------------------------------------------------------
import re
from core import logger
from core import scrapertools
DEFAULT_HEADERS = [["User-Agent", "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0"]]
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("streamondemand.servers.dailymotion get_video_url(page_url='%s')" % page_url)
video_urls = []
data, headers = scrapertools.read_body_and_headers(page_url, headers=DEFAULT_HEADERS)
data = data.replace("\\", "")
'''
"240":[{"type":"video/mp4","url":"http://www.dailymotion.com/cdn/H264-320x240/video/x33mvht.mp4?auth=1441130963-2562-u49z9kdc-84796332ccab3c7ce84e01c67a18b689"}]
'''
patron = '"([^"]+)":\[\{"type":"video/([^"]+)","url":"([^"]+)"\}\]'
matches = scrapertools.find_multiple_matches(data, patron)
subtitle = scrapertools.find_single_match(data, '"subtitles":.*?"es":.*?urls":\["([^"]+)"')
for cookie in headers:
if cookie[0] == "set-cookie":
header_cookie = cookie[1]
DEFAULT_HEADERS.append(['Cookie', header_cookie])
for stream_name, stream_type, stream_url in matches:
stream_url = scrapertools.get_header_from_response(stream_url, header_to_get="location",
headers=DEFAULT_HEADERS)
video_urls.append([stream_name + "p ." + stream_type + " [dailymotion]", stream_url, 0, subtitle])
for video_url in video_urls:
logger.info("streamondemand.servers.dailymotion %s - %s" % (video_url[0], video_url[1]))
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
# http://www.dailymotion.com/embed/video/xrva9o
# http://www.dailymotion.com/swf/video/xocczx
# http://www.dailymotion.com/swf/x17idxo&related=0
# http://www.dailymotion.com/video/xrva9o
patronvideos = 'dailymotion.com/(?:video/|swf/(?:video/|)|)(?:embed/video/|)([A-z0-9]+)'
logger.info("streamondemand.servers.dailymotion find_videos #" + patronvideos + "#")
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for match in matches:
titulo = "[dailymotion]"
url = "http://www.dailymotion.com/embed/video/" + match
if url not in encontrados:
logger.info(" url=" + url)
devuelve.append([titulo, url, 'dailymotion'])
encontrados.add(url)
else:
logger.info(" url duplicada=" + url)
return devuelve
|
camilonova/django
|
refs/heads/master
|
tests/template_tests/filter_tests/test_iriencode.py
|
133
|
from django.template.defaultfilters import iriencode, urlencode
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class IriencodeTests(SimpleTestCase):
"""
Ensure iriencode keeps safe strings.
"""
@setup({'iriencode01': '{{ url|iriencode }}'})
def test_iriencode01(self):
output = self.engine.render_to_string('iriencode01', {'url': '?test=1&me=2'})
self.assertEqual(output, '?test=1&me=2')
@setup({'iriencode02': '{% autoescape off %}{{ url|iriencode }}{% endautoescape %}'})
def test_iriencode02(self):
output = self.engine.render_to_string('iriencode02', {'url': '?test=1&me=2'})
self.assertEqual(output, '?test=1&me=2')
@setup({'iriencode03': '{{ url|iriencode }}'})
def test_iriencode03(self):
output = self.engine.render_to_string('iriencode03', {'url': mark_safe('?test=1&me=2')})
self.assertEqual(output, '?test=1&me=2')
@setup({'iriencode04': '{% autoescape off %}{{ url|iriencode }}{% endautoescape %}'})
def test_iriencode04(self):
output = self.engine.render_to_string('iriencode04', {'url': mark_safe('?test=1&me=2')})
self.assertEqual(output, '?test=1&me=2')
class FunctionTests(SimpleTestCase):
def test_unicode(self):
self.assertEqual(iriencode('S\xf8r-Tr\xf8ndelag'), 'S%C3%B8r-Tr%C3%B8ndelag')
def test_urlencoded(self):
self.assertEqual(iriencode(urlencode('fran\xe7ois & jill')), 'fran%C3%A7ois%20%26%20jill')
|
iglpdc/nipype
|
refs/heads/master
|
nipype/interfaces/semtools/filtering/tests/test_auto_TextureFromNoiseImageFilter.py
|
12
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from .....testing import assert_equal
from ..featuredetection import TextureFromNoiseImageFilter
def test_TextureFromNoiseImageFilter_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
inputRadius=dict(argstr='--inputRadius %d',
),
inputVolume=dict(argstr='--inputVolume %s',
),
outputVolume=dict(argstr='--outputVolume %s',
hash_files=False,
),
terminal_output=dict(nohash=True,
),
)
inputs = TextureFromNoiseImageFilter.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_TextureFromNoiseImageFilter_outputs():
output_map = dict(outputVolume=dict(),
)
outputs = TextureFromNoiseImageFilter.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
anhstudios/swganh
|
refs/heads/develop
|
data/scripts/templates/object/tangible/mission/quest_item/shared_jatrian_lytus_q3_needed.py
|
2
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/mission/quest_item/shared_jatrian_lytus_q3_needed.iff"
result.attribute_template_id = -1
result.stfName("loot_dant_n","jatrian_lytus_q3_needed")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
MerleLK/StudentSystem
|
refs/heads/master
|
student/models.py
|
1
|
from django.db import models
from teacher.models import CourseInfo
# Create your models here.
# 学生信息表
class StudentMessage(models.Model):
id = models.BigIntegerField(primary_key=True)
name = models.CharField(max_length=30)
age = models.IntegerField()
sex = models.CharField(max_length=10, default='MAN')
grade = models.CharField(max_length=10)
discipline = models.CharField(max_length=30)
class_code = models.CharField(max_length=30)
def __str__(self):
return self.name
class Elective(models.Model):
course_id = models.ForeignKey(CourseInfo, related_name="course_elective")
student_id = models.ForeignKey(StudentMessage, related_name="elective_student")
score = models.FloatField(default=60)
|
resmo/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/network/fortios/argspec/facts/facts.py
|
20
|
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
"""
The arg spec for the fortios monitor module.
"""
class FactsArgs(object):
""" The arg spec for the fortios monitor module
"""
def __init__(self, **kwargs):
pass
argument_spec = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": False},
"gather_subset": {
"required": True, "type": "list", "elements": "dict",
"options": {
"fact": {"required": True, "type": "str"},
"filters": {"required": False, "type": "list", "elements": "dict"}
}
}
}
|
vFense/vFenseAgent-nix
|
refs/heads/development
|
agent/deps/mac/Python-2.7.5/lib/python2.7/plat-mac/lib-scriptpackages/Netscape/Standard_Suite.py
|
82
|
"""Suite Standard Suite: Common terms for most applications
Level 1, version 1
Generated from /Volumes/Sap/Applications (Mac OS 9)/Netscape Communicator\xe2\x84\xa2 Folder/Netscape Communicator\xe2\x84\xa2
AETE/AEUT resource version 1/0, language 0, script 0
"""
import aetools
import MacOS
_code = 'CoRe'
from StdSuites.Standard_Suite import *
class Standard_Suite_Events(Standard_Suite_Events):
def close(self, _object, _attributes={}, **_arguments):
"""close: Close an object
Required argument: the objects to close
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'core'
_subcode = 'clos'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def data_size(self, _object, _attributes={}, **_arguments):
"""data size: Return the size in bytes of an object
Required argument: the object whose data size is to be returned
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: the size of the object in bytes
"""
_code = 'core'
_subcode = 'dsiz'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def get(self, _object, _attributes={}, **_arguments):
"""get: Get the data for an object
Required argument: the object whose data is to be returned
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: The data from the object
"""
_code = 'core'
_subcode = 'getd'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_set = {
'to' : 'data',
}
def set(self, _object, _attributes={}, **_arguments):
"""set: Set an object\xd5s data
Required argument: the object to change
Keyword argument to: the new value
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'core'
_subcode = 'setd'
aetools.keysubst(_arguments, self._argmap_set)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
class application(aetools.ComponentItem):
"""application - An application program """
want = 'capp'
class _Prop_alert_application(aetools.NProperty):
"""alert application - Most of the alerts will be sent to this application using yet unspecified AE interface. We need a few alert boxes: alert, confirm and notify. Any ideas on how to design this event? mailto:atotic@netscape.com. I\xd5d like to conform to the standard. """
which = 'ALAP'
want = 'type'
alert_application = _Prop_alert_application()
class _Prop_kiosk_mode(aetools.NProperty):
"""kiosk mode - Kiosk mode leaves very few menus enabled """
which = 'KOSK'
want = 'long'
kiosk_mode = _Prop_kiosk_mode()
# element 'cwin' as ['indx', 'name', 'ID ']
class window(aetools.ComponentItem):
"""window - A Window """
want = 'cwin'
class _Prop_URL(aetools.NProperty):
"""URL - Current URL """
which = 'curl'
want = 'TEXT'
class _Prop_bounds(aetools.NProperty):
"""bounds - the boundary rectangle for the window """
which = 'pbnd'
want = 'qdrt'
class _Prop_busy(aetools.NProperty):
"""busy - Is window loading something right now. 2, window is busy and will reject load requests. 1, window is busy, but will interrupt outstanding loads """
which = 'busy'
want = 'long'
class _Prop_closeable(aetools.NProperty):
"""closeable - Does the window have a close box? """
which = 'hclb'
want = 'bool'
class _Prop_floating(aetools.NProperty):
"""floating - Does the window float? """
which = 'isfl'
want = 'bool'
class _Prop_index(aetools.NProperty):
"""index - the number of the window """
which = 'pidx'
want = 'long'
class _Prop_modal(aetools.NProperty):
"""modal - Is the window modal? """
which = 'pmod'
want = 'bool'
class _Prop_name(aetools.NProperty):
"""name - the title of the window """
which = 'pnam'
want = 'itxt'
class _Prop_position(aetools.NProperty):
"""position - upper left coordinates of window """
which = 'ppos'
want = 'QDpt'
class _Prop_resizable(aetools.NProperty):
"""resizable - Is the window resizable? """
which = 'prsz'
want = 'bool'
class _Prop_titled(aetools.NProperty):
"""titled - Does the window have a title bar? """
which = 'ptit'
want = 'bool'
class _Prop_unique_ID(aetools.NProperty):
"""unique ID - Window\xd5s unique ID (a bridge between WWW! suite window id\xd5s and standard AE windows) """
which = 'wiid'
want = 'long'
class _Prop_visible(aetools.NProperty):
"""visible - is the window visible? """
which = 'pvis'
want = 'bool'
class _Prop_zoomable(aetools.NProperty):
"""zoomable - Is the window zoomable? """
which = 'iszm'
want = 'bool'
class _Prop_zoomed(aetools.NProperty):
"""zoomed - Is the window zoomed? """
which = 'pzum'
want = 'bool'
application._superclassnames = []
application._privpropdict = {
'alert_application' : _Prop_alert_application,
'kiosk_mode' : _Prop_kiosk_mode,
}
application._privelemdict = {
'window' : window,
}
window._superclassnames = []
window._privpropdict = {
'URL' : _Prop_URL,
'bounds' : _Prop_bounds,
'busy' : _Prop_busy,
'closeable' : _Prop_closeable,
'floating' : _Prop_floating,
'index' : _Prop_index,
'modal' : _Prop_modal,
'name' : _Prop_name,
'position' : _Prop_position,
'resizable' : _Prop_resizable,
'titled' : _Prop_titled,
'unique_ID' : _Prop_unique_ID,
'visible' : _Prop_visible,
'zoomable' : _Prop_zoomable,
'zoomed' : _Prop_zoomed,
}
window._privelemdict = {
}
#
# Indices of types declared in this module
#
_classdeclarations = {
'capp' : application,
'cwin' : window,
}
_propdeclarations = {
'ALAP' : _Prop_alert_application,
'KOSK' : _Prop_kiosk_mode,
'busy' : _Prop_busy,
'curl' : _Prop_URL,
'hclb' : _Prop_closeable,
'isfl' : _Prop_floating,
'iszm' : _Prop_zoomable,
'pbnd' : _Prop_bounds,
'pidx' : _Prop_index,
'pmod' : _Prop_modal,
'pnam' : _Prop_name,
'ppos' : _Prop_position,
'prsz' : _Prop_resizable,
'ptit' : _Prop_titled,
'pvis' : _Prop_visible,
'pzum' : _Prop_zoomed,
'wiid' : _Prop_unique_ID,
}
_compdeclarations = {
}
_enumdeclarations = {
}
|
ojengwa/odoo
|
refs/heads/8.0
|
addons/l10n_br/__init__.py
|
430
|
# -*- encoding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2009 Renato Lima - Akretion #
# #
#This program is free software: you can redistribute it and/or modify #
#it under the terms of the GNU Affero General Public License as published by #
#the Free Software Foundation, either version 3 of the License, or #
#(at your option) any later version. #
# #
#This program is distributed in the hope that it will be useful, #
#but WITHOUT ANY WARRANTY; without even the implied warranty of #
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
#GNU General Public License for more details. #
# #
#You should have received a copy of the GNU General Public License #
#along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
import account
|
lucidmotifs/auto-aoc
|
refs/heads/master
|
.venv/lib/python3.5/site-packages/pip/_vendor/requests/cookies.py
|
355
|
# -*- coding: utf-8 -*-
"""
requests.cookies
~~~~~~~~~~~~~~~~
Compatibility code to be able to use `cookielib.CookieJar` with requests.
requests.utils imports from here, so be careful with imports.
"""
import copy
import time
import calendar
import collections
from .compat import cookielib, urlparse, urlunparse, Morsel
try:
import threading
# grr, pyflakes: this fixes "redefinition of unused 'threading'"
threading
except ImportError:
import dummy_threading as threading
class MockRequest(object):
"""Wraps a `requests.Request` to mimic a `urllib2.Request`.
The code in `cookielib.CookieJar` expects this interface in order to correctly
manage cookie policies, i.e., determine whether a cookie can be set, given the
domains of the request and the cookie.
The original request object is read-only. The client is responsible for collecting
the new headers via `get_new_headers()` and interpreting them appropriately. You
probably want `get_cookie_header`, defined below.
"""
def __init__(self, request):
self._r = request
self._new_headers = {}
self.type = urlparse(self._r.url).scheme
def get_type(self):
return self.type
def get_host(self):
return urlparse(self._r.url).netloc
def get_origin_req_host(self):
return self.get_host()
def get_full_url(self):
# Only return the response's URL if the user hadn't set the Host
# header
if not self._r.headers.get('Host'):
return self._r.url
# If they did set it, retrieve it and reconstruct the expected domain
host = self._r.headers['Host']
parsed = urlparse(self._r.url)
# Reconstruct the URL as we expect it
return urlunparse([
parsed.scheme, host, parsed.path, parsed.params, parsed.query,
parsed.fragment
])
def is_unverifiable(self):
return True
def has_header(self, name):
return name in self._r.headers or name in self._new_headers
def get_header(self, name, default=None):
return self._r.headers.get(name, self._new_headers.get(name, default))
def add_header(self, key, val):
"""cookielib has no legitimate use for this method; add it back if you find one."""
raise NotImplementedError("Cookie headers should be added with add_unredirected_header()")
def add_unredirected_header(self, name, value):
self._new_headers[name] = value
def get_new_headers(self):
return self._new_headers
@property
def unverifiable(self):
return self.is_unverifiable()
@property
def origin_req_host(self):
return self.get_origin_req_host()
@property
def host(self):
return self.get_host()
class MockResponse(object):
"""Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`.
...what? Basically, expose the parsed HTTP headers from the server response
the way `cookielib` expects to see them.
"""
def __init__(self, headers):
"""Make a MockResponse for `cookielib` to read.
:param headers: a httplib.HTTPMessage or analogous carrying the headers
"""
self._headers = headers
def info(self):
return self._headers
def getheaders(self, name):
self._headers.getheaders(name)
def extract_cookies_to_jar(jar, request, response):
"""Extract the cookies from the response into a CookieJar.
:param jar: cookielib.CookieJar (not necessarily a RequestsCookieJar)
:param request: our own requests.Request object
:param response: urllib3.HTTPResponse object
"""
if not (hasattr(response, '_original_response') and
response._original_response):
return
# the _original_response field is the wrapped httplib.HTTPResponse object,
req = MockRequest(request)
# pull out the HTTPMessage with the headers and put it in the mock:
res = MockResponse(response._original_response.msg)
jar.extract_cookies(res, req)
def get_cookie_header(jar, request):
"""
Produce an appropriate Cookie header string to be sent with `request`, or None.
:rtype: str
"""
r = MockRequest(request)
jar.add_cookie_header(r)
return r.get_new_headers().get('Cookie')
def remove_cookie_by_name(cookiejar, name, domain=None, path=None):
"""Unsets a cookie by name, by default over all domains and paths.
Wraps CookieJar.clear(), is O(n).
"""
clearables = []
for cookie in cookiejar:
if cookie.name != name:
continue
if domain is not None and domain != cookie.domain:
continue
if path is not None and path != cookie.path:
continue
clearables.append((cookie.domain, cookie.path, cookie.name))
for domain, path, name in clearables:
cookiejar.clear(domain, path, name)
class CookieConflictError(RuntimeError):
"""There are two cookies that meet the criteria specified in the cookie jar.
Use .get and .set and include domain and path args in order to be more specific.
"""
class RequestsCookieJar(cookielib.CookieJar, collections.MutableMapping):
"""Compatibility class; is a cookielib.CookieJar, but exposes a dict
interface.
This is the CookieJar we create by default for requests and sessions that
don't specify one, since some clients may expect response.cookies and
session.cookies to support dict operations.
Requests does not use the dict interface internally; it's just for
compatibility with external client code. All requests code should work
out of the box with externally provided instances of ``CookieJar``, e.g.
``LWPCookieJar`` and ``FileCookieJar``.
Unlike a regular CookieJar, this class is pickleable.
.. warning:: dictionary operations that are normally O(1) may be O(n).
"""
def get(self, name, default=None, domain=None, path=None):
"""Dict-like get() that also supports optional domain and path args in
order to resolve naming collisions from using one cookie jar over
multiple domains.
.. warning:: operation is O(n), not O(1).
"""
try:
return self._find_no_duplicates(name, domain, path)
except KeyError:
return default
def set(self, name, value, **kwargs):
"""Dict-like set() that also supports optional domain and path args in
order to resolve naming collisions from using one cookie jar over
multiple domains.
"""
# support client code that unsets cookies by assignment of a None value:
if value is None:
remove_cookie_by_name(self, name, domain=kwargs.get('domain'), path=kwargs.get('path'))
return
if isinstance(value, Morsel):
c = morsel_to_cookie(value)
else:
c = create_cookie(name, value, **kwargs)
self.set_cookie(c)
return c
def iterkeys(self):
"""Dict-like iterkeys() that returns an iterator of names of cookies
from the jar.
.. seealso:: itervalues() and iteritems().
"""
for cookie in iter(self):
yield cookie.name
def keys(self):
"""Dict-like keys() that returns a list of names of cookies from the
jar.
.. seealso:: values() and items().
"""
return list(self.iterkeys())
def itervalues(self):
"""Dict-like itervalues() that returns an iterator of values of cookies
from the jar.
.. seealso:: iterkeys() and iteritems().
"""
for cookie in iter(self):
yield cookie.value
def values(self):
"""Dict-like values() that returns a list of values of cookies from the
jar.
.. seealso:: keys() and items().
"""
return list(self.itervalues())
def iteritems(self):
"""Dict-like iteritems() that returns an iterator of name-value tuples
from the jar.
.. seealso:: iterkeys() and itervalues().
"""
for cookie in iter(self):
yield cookie.name, cookie.value
def items(self):
"""Dict-like items() that returns a list of name-value tuples from the
jar. Allows client-code to call ``dict(RequestsCookieJar)`` and get a
vanilla python dict of key value pairs.
.. seealso:: keys() and values().
"""
return list(self.iteritems())
def list_domains(self):
"""Utility method to list all the domains in the jar."""
domains = []
for cookie in iter(self):
if cookie.domain not in domains:
domains.append(cookie.domain)
return domains
def list_paths(self):
"""Utility method to list all the paths in the jar."""
paths = []
for cookie in iter(self):
if cookie.path not in paths:
paths.append(cookie.path)
return paths
def multiple_domains(self):
"""Returns True if there are multiple domains in the jar.
Returns False otherwise.
:rtype: bool
"""
domains = []
for cookie in iter(self):
if cookie.domain is not None and cookie.domain in domains:
return True
domains.append(cookie.domain)
return False # there is only one domain in jar
def get_dict(self, domain=None, path=None):
"""Takes as an argument an optional domain and path and returns a plain
old Python dict of name-value pairs of cookies that meet the
requirements.
:rtype: dict
"""
dictionary = {}
for cookie in iter(self):
if (domain is None or cookie.domain == domain) and (path is None
or cookie.path == path):
dictionary[cookie.name] = cookie.value
return dictionary
def __contains__(self, name):
try:
return super(RequestsCookieJar, self).__contains__(name)
except CookieConflictError:
return True
def __getitem__(self, name):
"""Dict-like __getitem__() for compatibility with client code. Throws
exception if there are more than one cookie with name. In that case,
use the more explicit get() method instead.
.. warning:: operation is O(n), not O(1).
"""
return self._find_no_duplicates(name)
def __setitem__(self, name, value):
"""Dict-like __setitem__ for compatibility with client code. Throws
exception if there is already a cookie of that name in the jar. In that
case, use the more explicit set() method instead.
"""
self.set(name, value)
def __delitem__(self, name):
"""Deletes a cookie given a name. Wraps ``cookielib.CookieJar``'s
``remove_cookie_by_name()``.
"""
remove_cookie_by_name(self, name)
def set_cookie(self, cookie, *args, **kwargs):
if hasattr(cookie.value, 'startswith') and cookie.value.startswith('"') and cookie.value.endswith('"'):
cookie.value = cookie.value.replace('\\"', '')
return super(RequestsCookieJar, self).set_cookie(cookie, *args, **kwargs)
def update(self, other):
"""Updates this jar with cookies from another CookieJar or dict-like"""
if isinstance(other, cookielib.CookieJar):
for cookie in other:
self.set_cookie(copy.copy(cookie))
else:
super(RequestsCookieJar, self).update(other)
def _find(self, name, domain=None, path=None):
"""Requests uses this method internally to get cookie values.
If there are conflicting cookies, _find arbitrarily chooses one.
See _find_no_duplicates if you want an exception thrown if there are
conflicting cookies.
:param name: a string containing name of cookie
:param domain: (optional) string containing domain of cookie
:param path: (optional) string containing path of cookie
:return: cookie.value
"""
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
return cookie.value
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
def _find_no_duplicates(self, name, domain=None, path=None):
"""Both ``__get_item__`` and ``get`` call this function: it's never
used elsewhere in Requests.
:param name: a string containing name of cookie
:param domain: (optional) string containing domain of cookie
:param path: (optional) string containing path of cookie
:raises KeyError: if cookie is not found
:raises CookieConflictError: if there are multiple cookies
that match name and optionally domain and path
:return: cookie.value
"""
toReturn = None
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
if toReturn is not None: # if there are multiple cookies that meet passed in criteria
raise CookieConflictError('There are multiple cookies with name, %r' % (name))
toReturn = cookie.value # we will eventually return this as long as no cookie conflict
if toReturn:
return toReturn
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
def __getstate__(self):
"""Unlike a normal CookieJar, this class is pickleable."""
state = self.__dict__.copy()
# remove the unpickleable RLock object
state.pop('_cookies_lock')
return state
def __setstate__(self, state):
"""Unlike a normal CookieJar, this class is pickleable."""
self.__dict__.update(state)
if '_cookies_lock' not in self.__dict__:
self._cookies_lock = threading.RLock()
def copy(self):
"""Return a copy of this RequestsCookieJar."""
new_cj = RequestsCookieJar()
new_cj.update(self)
return new_cj
def _copy_cookie_jar(jar):
if jar is None:
return None
if hasattr(jar, 'copy'):
# We're dealing with an instance of RequestsCookieJar
return jar.copy()
# We're dealing with a generic CookieJar instance
new_jar = copy.copy(jar)
new_jar.clear()
for cookie in jar:
new_jar.set_cookie(copy.copy(cookie))
return new_jar
def create_cookie(name, value, **kwargs):
"""Make a cookie from underspecified parameters.
By default, the pair of `name` and `value` will be set for the domain ''
and sent on every request (this is sometimes called a "supercookie").
"""
result = dict(
version=0,
name=name,
value=value,
port=None,
domain='',
path='/',
secure=False,
expires=None,
discard=True,
comment=None,
comment_url=None,
rest={'HttpOnly': None},
rfc2109=False,)
badargs = set(kwargs) - set(result)
if badargs:
err = 'create_cookie() got unexpected keyword arguments: %s'
raise TypeError(err % list(badargs))
result.update(kwargs)
result['port_specified'] = bool(result['port'])
result['domain_specified'] = bool(result['domain'])
result['domain_initial_dot'] = result['domain'].startswith('.')
result['path_specified'] = bool(result['path'])
return cookielib.Cookie(**result)
def morsel_to_cookie(morsel):
"""Convert a Morsel object into a Cookie containing the one k/v pair."""
expires = None
if morsel['max-age']:
try:
expires = int(time.time() + int(morsel['max-age']))
except ValueError:
raise TypeError('max-age: %s must be integer' % morsel['max-age'])
elif morsel['expires']:
time_template = '%a, %d-%b-%Y %H:%M:%S GMT'
expires = calendar.timegm(
time.strptime(morsel['expires'], time_template)
)
return create_cookie(
comment=morsel['comment'],
comment_url=bool(morsel['comment']),
discard=False,
domain=morsel['domain'],
expires=expires,
name=morsel.key,
path=morsel['path'],
port=None,
rest={'HttpOnly': morsel['httponly']},
rfc2109=False,
secure=bool(morsel['secure']),
value=morsel.value,
version=morsel['version'] or 0,
)
def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True):
"""Returns a CookieJar from a key/value dictionary.
:param cookie_dict: Dict of key/values to insert into CookieJar.
:param cookiejar: (optional) A cookiejar to add the cookies to.
:param overwrite: (optional) If False, will not replace cookies
already in the jar with new ones.
"""
if cookiejar is None:
cookiejar = RequestsCookieJar()
if cookie_dict is not None:
names_from_jar = [cookie.name for cookie in cookiejar]
for name in cookie_dict:
if overwrite or (name not in names_from_jar):
cookiejar.set_cookie(create_cookie(name, cookie_dict[name]))
return cookiejar
def merge_cookies(cookiejar, cookies):
"""Add cookies to cookiejar and returns a merged CookieJar.
:param cookiejar: CookieJar object to add the cookies to.
:param cookies: Dictionary or CookieJar object to be added.
"""
if not isinstance(cookiejar, cookielib.CookieJar):
raise ValueError('You can only merge into CookieJar')
if isinstance(cookies, dict):
cookiejar = cookiejar_from_dict(
cookies, cookiejar=cookiejar, overwrite=False)
elif isinstance(cookies, cookielib.CookieJar):
try:
cookiejar.update(cookies)
except AttributeError:
for cookie_in_jar in cookies:
cookiejar.set_cookie(cookie_in_jar)
return cookiejar
|
damdam-s/OpenUpgrade
|
refs/heads/8.0
|
addons/web_api/__openerp__.py
|
384
|
{
'name': 'OpenERP Web API',
'category': 'Hidden',
'description': """
Openerp Web API.
================
""",
'version': '2.0',
'depends': ['web'],
'installable': True,
'auto_install': False,
}
|
eayunstack/rally
|
refs/heads/product
|
tests/unit/verification/test_json2html.py
|
6
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime as dt
import mock
from rally.verification.tempest import json2html
from tests.unit import test
BASE = "rally.verification.tempest"
class HtmlOutputTestCase(test.TestCase):
@mock.patch(BASE + ".json2html.ui_utils.get_template")
def test_generate_report(self, mock_get_template):
results = {
"time": 22.75,
"tests": 4,
"success": 1,
"skipped": 1,
"failures": 1,
"expected_failures": 0,
"unexpected_success": 0,
"test_cases": {
"tp": {"name": "tp",
"status": "success",
"time": 2},
"ts": {"name": "ts",
"status": "skip",
"reason": "ts_skip",
"time": 4},
"tf": {"name": "tf",
"status": "fail",
"time": 6,
"traceback": "fail_log"}}}
expected_report = {
"failures": 1,
"success": 1,
"skipped": 1,
"expected_failures": 0,
"unexpected_success": 0,
"total": 4,
"time": "{0} ({1} s)".format(
dt.timedelta(seconds=23), 22.75),
"tests": [{"name": "tf",
"id": 0,
"output": "fail_log",
"status": "fail",
"time": 6},
{"name": "tp",
"id": 1,
"output": "",
"status": "success",
"time": 2},
{"name": "ts",
"id": 2,
"output": "Reason:\n ts_skip",
"status": "skip",
"time": 4}]}
json2html.generate_report(results)
mock_get_template.assert_called_once_with("verification/report.mako")
mock_get_template.return_value.render.assert_called_once_with(
report=expected_report)
@mock.patch(BASE + ".json2html.ui_utils.get_template")
def test_convert_bug_id_in_reason_into_bug_link(self, mock_get_template):
results = {
"failures": 0,
"success": 0,
"skipped": 1,
"expected_failures": 0,
"unexpected_success": 0,
"tests": 1,
"time": 0,
"test_cases": {"one_test": {
"status": "skip",
"name": "one_test",
"reason": "Skipped until Bug: 666666 is resolved.",
"time": "time"}}}
expected_report = {
"failures": 0,
"success": 0,
"skipped": 1,
"expected_failures": 0,
"unexpected_success": 0,
"total": 1,
"time": "{0} ({1} s)".format(dt.timedelta(seconds=0), 0),
"tests": [{
"id": 0,
"status": "skip",
"name": "one_test",
"output": "Reason:\n Skipped until Bug: <a href='https://"
"launchpad.net/bugs/666666'>666666</a> is resolved.",
"time": "time"}]}
json2html.generate_report(results)
mock_get_template.assert_called_once_with("verification/report.mako")
mock_get_template.return_value.render.assert_called_once_with(
report=expected_report)
|
t0in4/django
|
refs/heads/master
|
tests/indexes/tests.py
|
321
|
from unittest import skipUnless
from django.db import connection
from django.test import TestCase
from .models import Article, ArticleTranslation, IndexTogetherSingleList
class SchemaIndexesTests(TestCase):
"""
Test index handling by the db.backends.schema infrastructure.
"""
def test_index_name_hash(self):
"""
Index names should be deterministic.
"""
with connection.schema_editor() as editor:
index_name = editor._create_index_name(
model=Article,
column_names=("c1", "c2", "c3"),
suffix="123",
)
self.assertEqual(index_name, "indexes_article_c1_7ce4cc86123")
def test_index_together(self):
editor = connection.schema_editor()
index_sql = editor._model_indexes_sql(Article)
self.assertEqual(len(index_sql), 1)
# Ensure the index name is properly quoted
self.assertIn(
connection.ops.quote_name(
editor._create_index_name(Article, ['headline', 'pub_date'], suffix='_idx')
),
index_sql[0]
)
def test_index_together_single_list(self):
# Test for using index_together with a single list (#22172)
index_sql = connection.schema_editor()._model_indexes_sql(IndexTogetherSingleList)
self.assertEqual(len(index_sql), 1)
@skipUnless(connection.vendor == 'postgresql',
"This is a postgresql-specific issue")
def test_postgresql_text_indexes(self):
"""Test creation of PostgreSQL-specific text indexes (#12234)"""
from .models import IndexedArticle
index_sql = connection.schema_editor()._model_indexes_sql(IndexedArticle)
self.assertEqual(len(index_sql), 5)
self.assertIn('("headline" varchar_pattern_ops)', index_sql[2])
self.assertIn('("body" text_pattern_ops)', index_sql[3])
# unique=True and db_index=True should only create the varchar-specific
# index (#19441).
self.assertIn('("slug" varchar_pattern_ops)', index_sql[4])
@skipUnless(connection.vendor == 'postgresql',
"This is a postgresql-specific issue")
def test_postgresql_virtual_relation_indexes(self):
"""Test indexes are not created for related objects"""
index_sql = connection.schema_editor()._model_indexes_sql(Article)
self.assertEqual(len(index_sql), 1)
@skipUnless(connection.vendor == 'mysql', "This is a mysql-specific issue")
def test_no_index_for_foreignkey(self):
"""
MySQL on InnoDB already creates indexes automatically for foreign keys.
(#14180).
"""
storage = connection.introspection.get_storage_engine(
connection.cursor(), ArticleTranslation._meta.db_table
)
if storage != "InnoDB":
self.skip("This test only applies to the InnoDB storage engine")
index_sql = connection.schema_editor()._model_indexes_sql(ArticleTranslation)
self.assertEqual(index_sql, [])
|
cloud9UG/odoo
|
refs/heads/8.0
|
addons/mail/wizard/__init__.py
|
438
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import invite
import mail_compose_message
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
skg-net/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/azure/azure_rm_networkinterface_facts.py
|
10
|
#!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: azure_rm_networkinterface_facts
version_added: "2.1"
short_description: Get network interface facts.
description:
- Get facts for a specific network interface or all network interfaces within a resource group.
options:
name:
description:
- Only show results for a specific network interface.
resource_group:
description:
- Name of the resource group containing the network interface(s). Required when searching by name.
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
extends_documentation_fragment:
- azure
author:
- "Chris Houseknecht (@chouseknecht)"
- "Matt Davis (@nitzmahone)"
'''
EXAMPLES = '''
- name: Get facts for one network interface
azure_rm_networkinterface_facts:
resource_group: Testing
name: nic001
- name: Get network interfaces within a resource group
azure_rm_networkinterface_facts:
resource_group: Testing
- name: Get network interfaces by tag
azure_rm_networkinterface_facts:
resource_group: Testing
tags:
- testing
- foo:bar
'''
RETURN = '''
azure_networkinterfaces:
description: List of network interface dicts.
returned: always
type: list
example: [{
"dns_settings": {
"applied_dns_servers": [],
"dns_servers": [],
"internal_dns_name_label": null,
"internal_fqdn": null
},
"enable_ip_forwarding": false,
"etag": 'W/"59726bfc-08c4-44ed-b900-f6a559876a9d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkInterfaces/nic003",
"ip_configuration": {
"name": "default",
"private_ip_address": "10.10.0.4",
"private_ip_allocation_method": "Dynamic",
"public_ip_address": {
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/publicIPAddresses/publicip001",
"name": "publicip001"
},
"subnet": {
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/virtualNetworks/vnet001/subnets/subnet001",
"name": "subnet001",
"virtual_network_name": "vnet001"
}
},
"location": "westus",
"mac_address": null,
"name": "nic003",
"network_security_group": {
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/secgroup001",
"name": "secgroup001"
},
"primary": null,
"provisioning_state": "Succeeded",
"tags": {},
"type": "Microsoft.Network/networkInterfaces"
}]
''' # NOQA
try:
from msrestazure.azure_exceptions import CloudError
from azure.common import AzureMissingResourceHttpError, AzureHttpError
except:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
AZURE_OBJECT_CLASS = 'NetworkInterface'
class AzureRMNetworkInterfaceFacts(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
name=dict(type='str'),
resource_group=dict(type='str'),
tags=dict(type='list')
)
self.results = dict(
changed=False,
ansible_facts=dict(azure_networkinterfaces=[])
)
self.name = None
self.resource_group = None
self.tags = None
super(AzureRMNetworkInterfaceFacts, self).__init__(self.module_arg_spec,
supports_tags=False,
facts_module=True
)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if self.name and not self.resource_group:
self.fail("Parameter error: resource group required when filtering by name.")
if self.name:
self.results['ansible_facts']['azure_networkinterfaces'] = self.get_item()
elif self.resource_group:
self.results['ansible_facts']['azure_networkinterfaces'] = self.list_resource_group()
else:
self.results['ansible_facts']['azure_networkinterfaces'] = self.list_all()
return self.results
def get_item(self):
self.log('Get properties for {0}'.format(self.name))
result = []
item = None
try:
item = self.network_client.network_interfaces.get(self.resource_group, self.name)
except:
pass
if item and self.has_tags(item.tags, self.tags):
nic = self.serialize_obj(item, AZURE_OBJECT_CLASS)
result = [nic]
return result
def list_resource_group(self):
self.log('List for resource group')
try:
response = self.network_client.network_interfaces.list(self.resource_group)
except Exception as exc:
self.fail("Error listing by resource group {0} - {1}".format(self.resource_group, str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
nic = self.serialize_obj(item, AZURE_OBJECT_CLASS)
results.append(nic)
return results
def list_all(self):
self.log('List all')
try:
response = self.network_client.network_interfaces.list_all()
except Exception as exc:
self.fail("Error listing all - {0}".format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
nic = self.serialize_obj(item, AZURE_OBJECT_CLASS)
results.append(nic)
return results
def main():
AzureRMNetworkInterfaceFacts()
if __name__ == '__main__':
main()
|
guessit-io/guessit
|
refs/heads/develop
|
guessit/yamlutils.py
|
2
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Options
"""
from collections import OrderedDict
import babelfish
import yaml # pylint:disable=wrong-import-order
from .rules.common.quantity import BitRate, FrameRate, Size
class OrderedDictYAMLLoader(yaml.SafeLoader):
"""
A YAML loader that loads mappings into ordered dictionaries.
From https://gist.github.com/enaeseth/844388
"""
def __init__(self, *args, **kwargs):
yaml.SafeLoader.__init__(self, *args, **kwargs)
self.add_constructor('tag:yaml.org,2002:map', type(self).construct_yaml_map)
self.add_constructor('tag:yaml.org,2002:omap', type(self).construct_yaml_map)
def construct_yaml_map(self, node):
data = OrderedDict()
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_mapping(self, node, deep=False):
if isinstance(node, yaml.MappingNode):
self.flatten_mapping(node)
else: # pragma: no cover
raise yaml.constructor.ConstructorError(None, None,
'expected a mapping node, but found %s' % node.id, node.start_mark)
mapping = OrderedDict()
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError as exc: # pragma: no cover
raise yaml.constructor.ConstructorError('while constructing a mapping',
node.start_mark, 'found unacceptable key (%s)'
% exc, key_node.start_mark)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
class CustomDumper(yaml.SafeDumper):
"""
Custom YAML Dumper.
"""
pass # pylint:disable=unnecessary-pass
def default_representer(dumper, data):
"""Default representer"""
return dumper.represent_str(str(data))
CustomDumper.add_representer(babelfish.Language, default_representer)
CustomDumper.add_representer(babelfish.Country, default_representer)
CustomDumper.add_representer(BitRate, default_representer)
CustomDumper.add_representer(FrameRate, default_representer)
CustomDumper.add_representer(Size, default_representer)
def ordered_dict_representer(dumper, data):
"""OrderedDict representer"""
return dumper.represent_mapping('tag:yaml.org,2002:map', data.items())
CustomDumper.add_representer(OrderedDict, ordered_dict_representer)
|
gkc1000/pyscf
|
refs/heads/master
|
pyscf/cc/test/test_gccsd_t.py
|
2
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy
import copy
from functools import reduce
from pyscf import gto, scf, lib, symm
from pyscf import cc
from pyscf.cc import uccsd_t
from pyscf.cc import gccsd_t
mol = gto.Mole()
mol.verbose = 5
mol.output = '/dev/null'
mol.atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -.757 , .587)],
[1 , (0. , .757 , .587)]]
mol.spin = 2
mol.basis = '3-21g'
mol.symmetry = 'C2v'
mol.build()
mol1 = copy.copy(mol)
mol1.symmetry = False
mf = scf.UHF(mol1).run(conv_tol=1e-14)
myucc = cc.UCCSD(mf).run()
mygcc = cc.GCCSD(mf).run()
def tearDownModule():
global mol, mol1, mf, myucc, mygcc
mol.stdout.close()
del mol, mol1, mf, myucc, mygcc
class KnownValues(unittest.TestCase):
def test_gccsd_t_compare_uccsd_t(self):
self.assertAlmostEqual(myucc.ccsd_t(), mygcc.ccsd_t(t1=None), 7)
def test_gccsd_t(self):
mf1 = copy.copy(mf)
nao, nmo = mf.mo_coeff[0].shape
numpy.random.seed(10)
mf1.mo_coeff = numpy.random.random((2,nao,nmo))
numpy.random.seed(12)
nocca, noccb = mol.nelec
nmo = mf1.mo_occ[0].size
nvira = nmo - nocca
nvirb = nmo - noccb
t1a = .1 * numpy.random.random((nocca,nvira))
t1b = .1 * numpy.random.random((noccb,nvirb))
t2aa = .1 * numpy.random.random((nocca,nocca,nvira,nvira))
t2aa = t2aa - t2aa.transpose(0,1,3,2)
t2aa = t2aa - t2aa.transpose(1,0,2,3)
t2bb = .1 * numpy.random.random((noccb,noccb,nvirb,nvirb))
t2bb = t2bb - t2bb.transpose(0,1,3,2)
t2bb = t2bb - t2bb.transpose(1,0,2,3)
t2ab = .1 * numpy.random.random((nocca,noccb,nvira,nvirb))
mycc = cc.GCCSD(mf1)
t1 = mycc.spatial2spin((t1a, t1b ))
t2 = mycc.spatial2spin((t2aa, t2ab, t2bb))
eris = mycc.ao2mo()
e3a = gccsd_t.kernel(mycc, eris, t1, t2)
self.assertAlmostEqual(e3a, 9877.2780859693339, 6)
def test_gccsd_t_complex(self):
nocc, nvir = 4, 6
nmo = nocc + nvir
numpy.random.seed(1)
eris = cc.gccsd._PhysicistsERIs()
h = (numpy.random.random((nmo,nmo)) +
numpy.random.random((nmo,nmo)) * .6j - .5-.3j)
eris.fock = h + h.T.conj() + numpy.diag(numpy.arange(nmo)) * 2
eri1 = (numpy.random.random((nmo,nmo,nmo,nmo)) +
numpy.random.random((nmo,nmo,nmo,nmo))*.8j - .5-.4j)
eri1 = eri1 - eri1.transpose(0,1,3,2)
eri1 = eri1 - eri1.transpose(1,0,2,3)
eri1 = eri1 + eri1.transpose(2,3,0,1).conj()
eris.ovvv = eri1[:nocc,nocc:,nocc:,nocc:]
eris.oovv = eri1[:nocc,:nocc,nocc:,nocc:]
eris.ooov = eri1[:nocc,:nocc,:nocc,nocc:]
t2 = (numpy.random.random((nocc,nocc,nvir,nvir)) +
numpy.random.random((nocc,nocc,nvir,nvir))*.8j - .5-.4j)
t2 = t2 - t2.transpose(0,1,3,2)
t2 = t2 - t2.transpose(1,0,2,3)
t1 = (numpy.random.random((nocc,nvir)) +
numpy.random.random((nocc,nvir))*.8j - .5-.4j)
eris.mo_energy = eris.fock.diagonal().real
gcc = cc.gccsd.GCCSD(scf.GHF(gto.M()))
self.assertAlmostEqual(gccsd_t.kernel(gcc, eris, t1, t2),
(-104.15886718888137+0.30739952563327672j), 9)
if __name__ == "__main__":
print("Full Tests for GCCSD(T)")
unittest.main()
|
luuhfelix/ProjetoFilmes
|
refs/heads/master
|
backend/appengine/routes/comentario/admin/home.py
|
2
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from config.template_middleware import TemplateResponse
from tekton import router
from gaecookie.decorator import no_csrf
from course_app import facade
from routes.trailers.admin import new, edit
def delete(_handler, course_id):
facade.delete_course_cmd(course_id)()
_handler.redirect(router.to_path(index))
@no_csrf
def index():
cmd = facade.list_courses_cmd()
courses = cmd()
edit_path = router.to_path(edit)
delete_path = router.to_path(delete)
short_form = facade.course_short_form()
def short_course_dict(course):
course_dct = short_form.fill_with_model(course)
course_dct['edit_path'] = router.to_path(edit_path, course_dct['id'])
course_dct['delete_path'] = router.to_path(delete_path, course_dct['id'])
return course_dct
short_courses = [short_course_dict(course) for course in courses]
context = {'trailers': short_courses,
'new_path': router.to_path(new)}
return TemplateResponse(context)
|
pheanex/ansible-modules-core
|
refs/heads/devel
|
cloud/amazon/ec2_metric_alarm.py
|
61
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
module: ec2_metric_alarm
short_description: "Create/update or delete AWS Cloudwatch 'metric alarms'"
description:
- Can create or delete AWS metric alarms
- Metrics you wish to alarm on must already exist
version_added: "1.6"
author: "Zacharie Eakin (@zeekin)"
options:
state:
description:
- register or deregister the alarm
required: true
choices: ['present', 'absent']
name:
description:
- Unique name for the alarm
required: true
metric:
description:
- Name of the monitored metric (e.g. CPUUtilization)
- Metric must already exist
required: false
namespace:
description:
- Name of the appropriate namespace ('AWS/EC2', 'System/Linux', etc.), which determines the category it will appear under in cloudwatch
required: false
statistic:
description:
- Operation applied to the metric
- Works in conjunction with period and evaluation_periods to determine the comparison value
required: false
options: ['SampleCount','Average','Sum','Minimum','Maximum']
comparison:
description:
- Determines how the threshold value is compared
required: false
options: ['<=','<','>','>=']
threshold:
description:
- Sets the min/max bound for triggering the alarm
required: false
period:
description:
- The time (in seconds) between metric evaluations
required: false
evaluation_periods:
description:
- The number of times in which the metric is evaluated before final calculation
required: false
unit:
description:
- The threshold's unit of measurement
required: false
options: ['Seconds','Microseconds','Milliseconds','Bytes','Kilobytes','Megabytes','Gigabytes','Terabytes','Bits','Kilobits','Megabits','Gigabits','Terabits','Percent','Count','Bytes/Second','Kilobytes/Second','Megabytes/Second','Gigabytes/Second','Terabytes/Second','Bits/Second','Kilobits/Second','Megabits/Second','Gigabits/Second','Terabits/Second','Count/Second','None']
description:
description:
- A longer description of the alarm
required: false
dimensions:
description:
- Describes to what the alarm is applied
required: false
alarm_actions:
description:
- A list of the names action(s) taken when the alarm is in the 'alarm' status
required: false
insufficient_data_actions:
description:
- A list of the names of action(s) to take when the alarm is in the 'insufficient_data' status
required: false
ok_actions:
description:
- A list of the names of action(s) to take when the alarm is in the 'ok' status
required: false
extends_documentation_fragment: aws
"""
EXAMPLES = '''
- name: create alarm
ec2_metric_alarm:
state: present
region: ap-southeast-2
name: "cpu-low"
metric: "CPUUtilization"
namespace: "AWS/EC2"
statistic: Average
comparison: "<="
threshold: 5.0
period: 300
evaluation_periods: 3
unit: "Percent"
description: "This will alarm when a bamboo slave's cpu usage average is lower than 5% for 15 minutes "
dimensions: {'InstanceId':'i-XXX'}
alarm_actions: ["action1","action2"]
'''
import sys
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
try:
import boto.ec2.cloudwatch
from boto.ec2.cloudwatch import CloudWatchConnection, MetricAlarm
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def create_metric_alarm(connection, module):
name = module.params.get('name')
metric = module.params.get('metric')
namespace = module.params.get('namespace')
statistic = module.params.get('statistic')
comparison = module.params.get('comparison')
threshold = module.params.get('threshold')
period = module.params.get('period')
evaluation_periods = module.params.get('evaluation_periods')
unit = module.params.get('unit')
description = module.params.get('description')
dimensions = module.params.get('dimensions')
alarm_actions = module.params.get('alarm_actions')
insufficient_data_actions = module.params.get('insufficient_data_actions')
ok_actions = module.params.get('ok_actions')
alarms = connection.describe_alarms(alarm_names=[name])
if not alarms:
alm = MetricAlarm(
name=name,
metric=metric,
namespace=namespace,
statistic=statistic,
comparison=comparison,
threshold=threshold,
period=period,
evaluation_periods=evaluation_periods,
unit=unit,
description=description,
dimensions=dimensions,
alarm_actions=alarm_actions,
insufficient_data_actions=insufficient_data_actions,
ok_actions=ok_actions
)
try:
connection.create_alarm(alm)
changed = True
alarms = connection.describe_alarms(alarm_names=[name])
except BotoServerError, e:
module.fail_json(msg=str(e))
else:
alarm = alarms[0]
changed = False
for attr in ('comparison','metric','namespace','statistic','threshold','period','evaluation_periods','unit','description'):
if getattr(alarm, attr) != module.params.get(attr):
changed = True
setattr(alarm, attr, module.params.get(attr))
#this is to deal with a current bug where you cannot assign '<=>' to the comparator when modifying an existing alarm
comparison = alarm.comparison
comparisons = {'<=' : 'LessThanOrEqualToThreshold', '<' : 'LessThanThreshold', '>=' : 'GreaterThanOrEqualToThreshold', '>' : 'GreaterThanThreshold'}
alarm.comparison = comparisons[comparison]
dim1 = module.params.get('dimensions', {})
dim2 = alarm.dimensions
for keys in dim1:
if not isinstance(dim1[keys], list):
dim1[keys] = [dim1[keys]]
if keys not in dim2 or dim1[keys] != dim2[keys]:
changed=True
setattr(alarm, 'dimensions', dim1)
for attr in ('alarm_actions','insufficient_data_actions','ok_actions'):
action = module.params.get(attr) or []
if getattr(alarm, attr) != action:
changed = True
setattr(alarm, attr, module.params.get(attr))
try:
if changed:
connection.create_alarm(alarm)
except BotoServerError, e:
module.fail_json(msg=str(e))
result = alarms[0]
module.exit_json(changed=changed, name=result.name,
actions_enabled=result.actions_enabled,
alarm_actions=result.alarm_actions,
alarm_arn=result.alarm_arn,
comparison=result.comparison,
description=result.description,
dimensions=result.dimensions,
evaluation_periods=result.evaluation_periods,
insufficient_data_actions=result.insufficient_data_actions,
last_updated=result.last_updated,
metric=result.metric,
namespace=result.namespace,
ok_actions=result.ok_actions,
period=result.period,
state_reason=result.state_reason,
state_value=result.state_value,
statistic=result.statistic,
threshold=result.threshold,
unit=result.unit)
def delete_metric_alarm(connection, module):
name = module.params.get('name')
alarms = connection.describe_alarms(alarm_names=[name])
if alarms:
try:
connection.delete_alarms([name])
module.exit_json(changed=True)
except BotoServerError, e:
module.fail_json(msg=str(e))
else:
module.exit_json(changed=False)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
metric=dict(type='str'),
namespace=dict(type='str'),
statistic=dict(type='str', choices=['SampleCount', 'Average', 'Sum', 'Minimum', 'Maximum']),
comparison=dict(type='str', choices=['<=', '<', '>', '>=']),
threshold=dict(type='float'),
period=dict(type='int'),
unit=dict(type='str', choices=['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes', 'Terabytes', 'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count', 'Bytes/Second', 'Kilobytes/Second', 'Megabytes/Second', 'Gigabytes/Second', 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second', 'Gigabits/Second', 'Terabits/Second', 'Count/Second', 'None']),
evaluation_periods=dict(type='int'),
description=dict(type='str'),
dimensions=dict(type='dict'),
alarm_actions=dict(type='list'),
insufficient_data_actions=dict(type='list'),
ok_actions=dict(type='list'),
state=dict(default='present', choices=['present', 'absent']),
region=dict(aliases=['aws_region', 'ec2_region']),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
try:
connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, StandardError), e:
module.fail_json(msg=str(e))
if state == 'present':
create_metric_alarm(connection, module)
elif state == 'absent':
delete_metric_alarm(connection, module)
main()
|
alexhenrie/poedit
|
refs/heads/master
|
deps/boost/tools/build/test/space_in_path.py
|
6
|
#!/usr/bin/python
# Copyright 2012 Steven Watanabe
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# Test that paths containing spaces are handled correctly by actions.
import BoostBuild
import os
t = BoostBuild.Tester(use_test_config=False)
t.write("has space/jamroot.jam", """\
import testing ;
unit-test test : test.cpp ;
actions write-file
{
@(STDOUT:E=okay) >"$(<)"
}
make test.txt : : @write-file ;
""")
t.write("has space/test.cpp", "int main() {}\n")
tmpdir = t.workpath("has space")
try:
oldtmp = os.environ["TMP"]
except:
oldtmp = None
try:
oldtmpdir = os.environ["TMPDIR"]
except:
oldtmpdir = None
os.environ["TMP"] = tmpdir; # Windows
os.environ["TMPDIR"] = tmpdir; # *nix
try:
t.run_build_system(["has space"])
t.expect_addition("has space/bin/test.txt")
t.expect_addition("has space/bin/$toolset/debug*/test.passed")
finally:
if oldtmp is not None:
os.environ["TMP"] = oldtmp
else:
del os.environ["TMP"]
if oldtmpdir is not None:
os.environ["TMPDIR"] = oldtmpdir
else:
del os.environ["TMPDIR"]
t.cleanup()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.