repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
jswope00/griffinx | refs/heads/master | common/lib/calc/calc/tests/test_preview.py | 257 | # -*- coding: utf-8 -*-
"""
Unit tests for preview.py
"""
import unittest
from calc import preview
import pyparsing
class LatexRenderedTest(unittest.TestCase):
"""
Test the initializing code for LatexRendered.
Specifically that it stores the correct data and handles parens well.
"""
def test_simple(self):
"""
Test that the data values are stored without changing.
"""
math = 'x^2'
obj = preview.LatexRendered(math, tall=True)
self.assertEquals(obj.latex, math)
self.assertEquals(obj.sans_parens, math)
self.assertEquals(obj.tall, True)
def _each_parens(self, with_parens, math, parens, tall=False):
"""
Helper method to test the way parens are wrapped.
"""
obj = preview.LatexRendered(math, parens=parens, tall=tall)
self.assertEquals(obj.latex, with_parens)
self.assertEquals(obj.sans_parens, math)
self.assertEquals(obj.tall, tall)
def test_parens(self):
""" Test curvy parens. """
self._each_parens('(x+y)', 'x+y', '(')
def test_brackets(self):
""" Test brackets. """
self._each_parens('[x+y]', 'x+y', '[')
def test_squiggles(self):
""" Test curly braces. """
self._each_parens(r'\{x+y\}', 'x+y', '{')
def test_parens_tall(self):
""" Test curvy parens with the tall parameter. """
self._each_parens(r'\left(x^y\right)', 'x^y', '(', tall=True)
def test_brackets_tall(self):
""" Test brackets, also tall. """
self._each_parens(r'\left[x^y\right]', 'x^y', '[', tall=True)
def test_squiggles_tall(self):
""" Test tall curly braces. """
self._each_parens(r'\left\{x^y\right\}', 'x^y', '{', tall=True)
def test_bad_parens(self):
""" Check that we get an error with invalid parens. """
with self.assertRaisesRegexp(Exception, 'Unknown parenthesis'):
preview.LatexRendered('x^2', parens='not parens')
class LatexPreviewTest(unittest.TestCase):
"""
Run integrative tests for `latex_preview`.
All functionality was tested `RenderMethodsTest`, but see if it combines
all together correctly.
"""
def test_no_input(self):
"""
With no input (including just whitespace), see that no error is thrown.
"""
self.assertEquals('', preview.latex_preview(''))
self.assertEquals('', preview.latex_preview(' '))
self.assertEquals('', preview.latex_preview(' \t '))
def test_number_simple(self):
""" Simple numbers should pass through. """
self.assertEquals(preview.latex_preview('3.1415'), '3.1415')
def test_number_suffix(self):
""" Suffixes should be escaped. """
self.assertEquals(preview.latex_preview('1.618k'), r'1.618\text{k}')
def test_number_sci_notation(self):
""" Numbers with scientific notation should display nicely """
self.assertEquals(
preview.latex_preview('6.0221413E+23'),
r'6.0221413\!\times\!10^{+23}'
)
self.assertEquals(
preview.latex_preview('-6.0221413E+23'),
r'-6.0221413\!\times\!10^{+23}'
)
def test_number_sci_notation_suffix(self):
""" Test numbers with both of these. """
self.assertEquals(
preview.latex_preview('6.0221413E+23k'),
r'6.0221413\!\times\!10^{+23}\text{k}'
)
self.assertEquals(
preview.latex_preview('-6.0221413E+23k'),
r'-6.0221413\!\times\!10^{+23}\text{k}'
)
def test_variable_simple(self):
""" Simple valid variables should pass through. """
self.assertEquals(preview.latex_preview('x', variables=['x']), 'x')
def test_greek(self):
""" Variable names that are greek should be formatted accordingly. """
self.assertEquals(preview.latex_preview('pi'), r'\pi')
def test_variable_subscript(self):
""" Things like 'epsilon_max' should display nicely """
self.assertEquals(
preview.latex_preview('epsilon_max', variables=['epsilon_max']),
r'\epsilon_{max}'
)
def test_function_simple(self):
""" Valid function names should be escaped. """
self.assertEquals(
preview.latex_preview('f(3)', functions=['f']),
r'\text{f}(3)'
)
def test_function_tall(self):
r""" Functions surrounding a tall element should have \left, \right """
self.assertEquals(
preview.latex_preview('f(3^2)', functions=['f']),
r'\text{f}\left(3^{2}\right)'
)
def test_function_sqrt(self):
""" Sqrt function should be handled specially. """
self.assertEquals(preview.latex_preview('sqrt(3)'), r'\sqrt{3}')
def test_function_log10(self):
""" log10 function should be handled specially. """
self.assertEquals(preview.latex_preview('log10(3)'), r'\log_{10}(3)')
def test_function_log2(self):
""" log2 function should be handled specially. """
self.assertEquals(preview.latex_preview('log2(3)'), r'\log_2(3)')
def test_power_simple(self):
""" Powers should wrap the elements with braces correctly. """
self.assertEquals(preview.latex_preview('2^3^4'), '2^{3^{4}}')
def test_power_parens(self):
""" Powers should ignore the parenthesis of the last math. """
self.assertEquals(preview.latex_preview('2^3^(4+5)'), '2^{3^{4+5}}')
def test_parallel(self):
r""" Parallel items should combine with '\|'. """
self.assertEquals(preview.latex_preview('2||3'), r'2\|3')
def test_product_mult_only(self):
r""" Simple products should combine with a '\cdot'. """
self.assertEquals(preview.latex_preview('2*3'), r'2\cdot 3')
def test_product_big_frac(self):
""" Division should combine with '\frac'. """
self.assertEquals(
preview.latex_preview('2*3/4/5'),
r'\frac{2\cdot 3}{4\cdot 5}'
)
def test_product_single_frac(self):
""" Division should ignore parens if they are extraneous. """
self.assertEquals(
preview.latex_preview('(2+3)/(4+5)'),
r'\frac{2+3}{4+5}'
)
def test_product_keep_going(self):
"""
Complex products/quotients should split into many '\frac's when needed.
"""
self.assertEquals(
preview.latex_preview('2/3*4/5*6'),
r'\frac{2}{3}\cdot \frac{4}{5}\cdot 6'
)
def test_sum(self):
""" Sums should combine its elements. """
# Use 'x' as the first term (instead of, say, '1'), so it can't be
# interpreted as a negative number.
self.assertEquals(
preview.latex_preview('-x+2-3+4', variables=['x']),
'-x+2-3+4'
)
def test_sum_tall(self):
""" A complicated expression should not hide the tallness. """
self.assertEquals(
preview.latex_preview('(2+3^2)'),
r'\left(2+3^{2}\right)'
)
def test_complicated(self):
"""
Given complicated input, ensure that exactly the correct string is made.
"""
self.assertEquals(
preview.latex_preview('11*f(x)+x^2*(3||4)/sqrt(pi)'),
r'11\cdot \text{f}(x)+\frac{x^{2}\cdot (3\|4)}{\sqrt{\pi}}'
)
self.assertEquals(
preview.latex_preview('log10(1+3/4/Cos(x^2)*(x+1))',
case_sensitive=True),
(r'\log_{10}\left(1+\frac{3}{4\cdot \text{Cos}\left(x^{2}\right)}'
r'\cdot (x+1)\right)')
)
def test_syntax_errors(self):
"""
Test a lot of math strings that give syntax errors
Rather than have a lot of self.assertRaises, make a loop and keep track
of those that do not throw a `ParseException`, and assert at the end.
"""
bad_math_list = [
'11+',
'11*',
'f((x)',
'sqrt(x^)',
'3f(x)', # Not 3*f(x)
'3|4',
'3|||4'
]
bad_exceptions = {}
for math in bad_math_list:
try:
preview.latex_preview(math)
except pyparsing.ParseException:
pass # This is what we were expecting. (not excepting :P)
except Exception as error: # pragma: no cover
bad_exceptions[math] = error
else: # pragma: no cover
# If there is no exception thrown, this is a problem
bad_exceptions[math] = None
self.assertEquals({}, bad_exceptions)
|
codeforamerica/skillcamp | refs/heads/master | ENV/lib/python2.7/site-packages/setuptools/command/upload_docs.py | 332 | # -*- coding: utf-8 -*-
"""upload_docs
Implements a Distutils 'upload_docs' subcommand (upload documentation to
PyPI's pythonhosted.org).
"""
import os
import socket
import zipfile
import tempfile
import sys
import shutil
from base64 import standard_b64encode
from pkg_resources import iter_entry_points
from distutils import log
from distutils.errors import DistutilsOptionError
from distutils.command.upload import upload
from setuptools.compat import httplib, urlparse, unicode, iteritems, PY3
errors = 'surrogateescape' if PY3 else 'strict'
# This is not just a replacement for byte literals
# but works as a general purpose encoder
def b(s, encoding='utf-8'):
if isinstance(s, unicode):
return s.encode(encoding, errors)
return s
class upload_docs(upload):
description = 'Upload documentation to PyPI'
user_options = [
('repository=', 'r',
"url of repository [default: %s]" % upload.DEFAULT_REPOSITORY),
('show-response', None,
'display full response text from server'),
('upload-dir=', None, 'directory to upload'),
]
boolean_options = upload.boolean_options
def has_sphinx(self):
if self.upload_dir is None:
for ep in iter_entry_points('distutils.commands', 'build_sphinx'):
return True
sub_commands = [('build_sphinx', has_sphinx)]
def initialize_options(self):
upload.initialize_options(self)
self.upload_dir = None
self.target_dir = None
def finalize_options(self):
upload.finalize_options(self)
if self.upload_dir is None:
if self.has_sphinx():
build_sphinx = self.get_finalized_command('build_sphinx')
self.target_dir = build_sphinx.builder_target_dir
else:
build = self.get_finalized_command('build')
self.target_dir = os.path.join(build.build_base, 'docs')
else:
self.ensure_dirname('upload_dir')
self.target_dir = self.upload_dir
self.announce('Using upload directory %s' % self.target_dir)
def create_zipfile(self, filename):
zip_file = zipfile.ZipFile(filename, "w")
try:
self.mkpath(self.target_dir) # just in case
for root, dirs, files in os.walk(self.target_dir):
if root == self.target_dir and not files:
raise DistutilsOptionError(
"no files found in upload directory '%s'"
% self.target_dir)
for name in files:
full = os.path.join(root, name)
relative = root[len(self.target_dir):].lstrip(os.path.sep)
dest = os.path.join(relative, name)
zip_file.write(full, dest)
finally:
zip_file.close()
def run(self):
# Run sub commands
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
tmp_dir = tempfile.mkdtemp()
name = self.distribution.metadata.get_name()
zip_file = os.path.join(tmp_dir, "%s.zip" % name)
try:
self.create_zipfile(zip_file)
self.upload_file(zip_file)
finally:
shutil.rmtree(tmp_dir)
def upload_file(self, filename):
f = open(filename, 'rb')
content = f.read()
f.close()
meta = self.distribution.metadata
data = {
':action': 'doc_upload',
'name': meta.get_name(),
'content': (os.path.basename(filename), content),
}
# set up the authentication
credentials = b(self.username + ':' + self.password)
credentials = standard_b64encode(credentials)
if PY3:
credentials = credentials.decode('ascii')
auth = "Basic " + credentials
# Build up the MIME payload for the POST data
boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = b('\n--') + b(boundary)
end_boundary = sep_boundary + b('--')
body = []
for key, values in iteritems(data):
title = '\nContent-Disposition: form-data; name="%s"' % key
# handle multiple entries for the same name
if not isinstance(values, list):
values = [values]
for value in values:
if type(value) is tuple:
title += '; filename="%s"' % value[0]
value = value[1]
else:
value = b(value)
body.append(sep_boundary)
body.append(b(title))
body.append(b("\n\n"))
body.append(value)
if value and value[-1:] == b('\r'):
body.append(b('\n')) # write an extra newline (lurve Macs)
body.append(end_boundary)
body.append(b("\n"))
body = b('').join(body)
self.announce("Submitting documentation to %s" % (self.repository),
log.INFO)
# build the Request
# We can't use urllib2 since we need to send the Basic
# auth right with the first request
schema, netloc, url, params, query, fragments = \
urlparse(self.repository)
assert not params and not query and not fragments
if schema == 'http':
conn = httplib.HTTPConnection(netloc)
elif schema == 'https':
conn = httplib.HTTPSConnection(netloc)
else:
raise AssertionError("unsupported schema "+schema)
data = ''
try:
conn.connect()
conn.putrequest("POST", url)
content_type = 'multipart/form-data; boundary=%s' % boundary
conn.putheader('Content-type', content_type)
conn.putheader('Content-length', str(len(body)))
conn.putheader('Authorization', auth)
conn.endheaders()
conn.send(body)
except socket.error:
e = sys.exc_info()[1]
self.announce(str(e), log.ERROR)
return
r = conn.getresponse()
if r.status == 200:
self.announce('Server response (%s): %s' % (r.status, r.reason),
log.INFO)
elif r.status == 301:
location = r.getheader('Location')
if location is None:
location = 'https://pythonhosted.org/%s/' % meta.get_name()
self.announce('Upload successful. Visit %s' % location,
log.INFO)
else:
self.announce('Upload failed (%s): %s' % (r.status, r.reason),
log.ERROR)
if self.show_response:
print('-'*75, r.read(), '-'*75)
|
ehashman/oh-mainline | refs/heads/master | vendor/packages/Django/django/dispatch/__init__.py | 571 | """Multi-consumer multi-producer dispatching mechanism
Originally based on pydispatch (BSD) http://pypi.python.org/pypi/PyDispatcher/2.0.1
See license.txt for original license.
Heavily modified for Django's purposes.
"""
from django.dispatch.dispatcher import Signal, receiver |
yongtang/tensorflow | refs/heads/master | tensorflow/python/estimator/gc.py | 40 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""gc python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.python.estimator import gc
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
gc.__all__ = [s for s in dir(gc) if not s.startswith('__')]
from tensorflow_estimator.python.estimator.gc import *
|
maxwelllincoln/edcs-datascraper | refs/heads/master | src/statistics.py | 1 | import csv
from .base_fetch_n_parse import BaseFetchNParse
class Statistics(BaseFetchNParse):
URL = "http://db.edcs.eu/epigr/hinweise/liste.html"
def _parse(self, soup):
parsed = {"index": {}, "list": []}
table_rows = soup.find_all('tr')
skip_row_one = table_rows[1:]
for table_row in skip_row_one:
if table_row.string == "\n":
break
cells = table_row.find_all('td')
volume = cells[0].string
count = cells[1].string
if count is not None and count != "-":
modifed_count = int(count.replace(".", ""))
else:
modifed_count = -1
parsed['index'][volume] = modifed_count
parsed['list'].append({'volume': volume, 'count': modifed_count})
return parsed
def csv(self, output):
with open(output, 'w', newline='') as csv_fs:
data = self.parse()
spamwriter = csv.writer(csv_fs, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(['volume', 'count'])
for row in data['list']:
spamwriter.writerow([row['volume'], row['count']])
|
Zhongqilong/mykbengineer | refs/heads/master | kbe/src/lib/python/Lib/test/test_string.py | 75 | import unittest, string
from test import support
class ModuleTest(unittest.TestCase):
def test_attrs(self):
string.whitespace
string.ascii_lowercase
string.ascii_uppercase
string.ascii_letters
string.digits
string.hexdigits
string.octdigits
string.punctuation
string.printable
def test_capwords(self):
self.assertEqual(string.capwords('abc def ghi'), 'Abc Def Ghi')
self.assertEqual(string.capwords('abc\tdef\nghi'), 'Abc Def Ghi')
self.assertEqual(string.capwords('abc\t def \nghi'), 'Abc Def Ghi')
self.assertEqual(string.capwords('ABC DEF GHI'), 'Abc Def Ghi')
self.assertEqual(string.capwords('ABC-DEF-GHI', '-'), 'Abc-Def-Ghi')
self.assertEqual(string.capwords('ABC-def DEF-ghi GHI'), 'Abc-def Def-ghi Ghi')
self.assertEqual(string.capwords(' aBc DeF '), 'Abc Def')
self.assertEqual(string.capwords('\taBc\tDeF\t'), 'Abc Def')
self.assertEqual(string.capwords('\taBc\tDeF\t', '\t'), '\tAbc\tDef\t')
def test_basic_formatter(self):
fmt = string.Formatter()
self.assertEqual(fmt.format("foo"), "foo")
self.assertEqual(fmt.format("foo{0}", "bar"), "foobar")
self.assertEqual(fmt.format("foo{1}{0}-{1}", "bar", 6), "foo6bar-6")
def test_auto_numbering(self):
fmt = string.Formatter()
self.assertEqual(fmt.format('foo{}{}', 'bar', 6),
'foo{}{}'.format('bar', 6))
self.assertEqual(fmt.format('foo{1}{num}{1}', None, 'bar', num=6),
'foo{1}{num}{1}'.format(None, 'bar', num=6))
self.assertEqual(fmt.format('{:^{}}', 'bar', 6),
'{:^{}}'.format('bar', 6))
self.assertEqual(fmt.format('{:^{pad}}{}', 'foo', 'bar', pad=6),
'{:^{pad}}{}'.format('foo', 'bar', pad=6))
with self.assertRaises(ValueError):
fmt.format('foo{1}{}', 'bar', 6)
with self.assertRaises(ValueError):
fmt.format('foo{}{1}', 'bar', 6)
def test_conversion_specifiers(self):
fmt = string.Formatter()
self.assertEqual(fmt.format("-{arg!r}-", arg='test'), "-'test'-")
self.assertEqual(fmt.format("{0!s}", 'test'), 'test')
self.assertRaises(ValueError, fmt.format, "{0!h}", 'test')
# issue13579
self.assertEqual(fmt.format("{0!a}", 42), '42')
self.assertEqual(fmt.format("{0!a}", string.ascii_letters),
"'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'")
self.assertEqual(fmt.format("{0!a}", chr(255)), "'\\xff'")
self.assertEqual(fmt.format("{0!a}", chr(256)), "'\\u0100'")
def test_name_lookup(self):
fmt = string.Formatter()
class AnyAttr:
def __getattr__(self, attr):
return attr
x = AnyAttr()
self.assertEqual(fmt.format("{0.lumber}{0.jack}", x), 'lumberjack')
with self.assertRaises(AttributeError):
fmt.format("{0.lumber}{0.jack}", '')
def test_index_lookup(self):
fmt = string.Formatter()
lookup = ["eggs", "and", "spam"]
self.assertEqual(fmt.format("{0[2]}{0[0]}", lookup), 'spameggs')
with self.assertRaises(IndexError):
fmt.format("{0[2]}{0[0]}", [])
with self.assertRaises(KeyError):
fmt.format("{0[2]}{0[0]}", {})
def test_override_get_value(self):
class NamespaceFormatter(string.Formatter):
def __init__(self, namespace={}):
string.Formatter.__init__(self)
self.namespace = namespace
def get_value(self, key, args, kwds):
if isinstance(key, str):
try:
# Check explicitly passed arguments first
return kwds[key]
except KeyError:
return self.namespace[key]
else:
string.Formatter.get_value(key, args, kwds)
fmt = NamespaceFormatter({'greeting':'hello'})
self.assertEqual(fmt.format("{greeting}, world!"), 'hello, world!')
def test_override_format_field(self):
class CallFormatter(string.Formatter):
def format_field(self, value, format_spec):
return format(value(), format_spec)
fmt = CallFormatter()
self.assertEqual(fmt.format('*{0}*', lambda : 'result'), '*result*')
def test_override_convert_field(self):
class XFormatter(string.Formatter):
def convert_field(self, value, conversion):
if conversion == 'x':
return None
return super().convert_field(value, conversion)
fmt = XFormatter()
self.assertEqual(fmt.format("{0!r}:{0!x}", 'foo', 'foo'), "'foo':None")
def test_override_parse(self):
class BarFormatter(string.Formatter):
# returns an iterable that contains tuples of the form:
# (literal_text, field_name, format_spec, conversion)
def parse(self, format_string):
for field in format_string.split('|'):
if field[0] == '+':
# it's markup
field_name, _, format_spec = field[1:].partition(':')
yield '', field_name, format_spec, None
else:
yield field, None, None, None
fmt = BarFormatter()
self.assertEqual(fmt.format('*|+0:^10s|*', 'foo'), '* foo *')
def test_check_unused_args(self):
class CheckAllUsedFormatter(string.Formatter):
def check_unused_args(self, used_args, args, kwargs):
# Track which arguments actually got used
unused_args = set(kwargs.keys())
unused_args.update(range(0, len(args)))
for arg in used_args:
unused_args.remove(arg)
if unused_args:
raise ValueError("unused arguments")
fmt = CheckAllUsedFormatter()
self.assertEqual(fmt.format("{0}", 10), "10")
self.assertEqual(fmt.format("{0}{i}", 10, i=100), "10100")
self.assertEqual(fmt.format("{0}{i}{1}", 10, 20, i=100), "1010020")
self.assertRaises(ValueError, fmt.format, "{0}{i}{1}", 10, 20, i=100, j=0)
self.assertRaises(ValueError, fmt.format, "{0}", 10, 20)
self.assertRaises(ValueError, fmt.format, "{0}", 10, 20, i=100)
self.assertRaises(ValueError, fmt.format, "{i}", 10, 20, i=100)
def test_vformat_recursion_limit(self):
fmt = string.Formatter()
args = ()
kwargs = dict(i=100)
with self.assertRaises(ValueError) as err:
fmt._vformat("{i}", args, kwargs, set(), -1)
self.assertIn("recursion", str(err.exception))
def test_main():
support.run_unittest(ModuleTest)
if __name__ == "__main__":
test_main()
|
mottosso/mindbender-setup | refs/heads/master | bin/windows/python36/Lib/collections/__init__.py | 19 | '''This module implements specialized container datatypes providing
alternatives to Python's general purpose built-in containers, dict,
list, set, and tuple.
* namedtuple factory function for creating tuple subclasses with named fields
* deque list-like container with fast appends and pops on either end
* ChainMap dict-like class for creating a single view of multiple mappings
* Counter dict subclass for counting hashable objects
* OrderedDict dict subclass that remembers the order entries were added
* defaultdict dict subclass that calls a factory function to supply missing values
* UserDict wrapper around dictionary objects for easier dict subclassing
* UserList wrapper around list objects for easier list subclassing
* UserString wrapper around string objects for easier string subclassing
'''
__all__ = ['deque', 'defaultdict', 'namedtuple', 'UserDict', 'UserList',
'UserString', 'Counter', 'OrderedDict', 'ChainMap']
# For backwards compatibility, continue to make the collections ABCs
# available through the collections module.
from _collections_abc import *
import _collections_abc
__all__ += _collections_abc.__all__
from operator import itemgetter as _itemgetter, eq as _eq
from keyword import iskeyword as _iskeyword
import sys as _sys
import heapq as _heapq
from _weakref import proxy as _proxy
from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
from reprlib import recursive_repr as _recursive_repr
try:
from _collections import deque
except ImportError:
pass
else:
MutableSequence.register(deque)
try:
from _collections import defaultdict
except ImportError:
pass
################################################################################
### OrderedDict
################################################################################
class _OrderedDictKeysView(KeysView):
def __reversed__(self):
yield from reversed(self._mapping)
class _OrderedDictItemsView(ItemsView):
def __reversed__(self):
for key in reversed(self._mapping):
yield (key, self._mapping[key])
class _OrderedDictValuesView(ValuesView):
def __reversed__(self):
for key in reversed(self._mapping):
yield self._mapping[key]
class _Link(object):
__slots__ = 'prev', 'next', 'key', '__weakref__'
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as regular dictionaries.
# The internal self.__map dict maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# The sentinel is in self.__hardroot with a weakref proxy in self.__root.
# The prev links are weakref proxies (to prevent circular references).
# Individual links are kept alive by the hard reference in self.__map.
# Those hard references disappear when a key is deleted from an OrderedDict.
def __init__(*args, **kwds):
'''Initialize an ordered dictionary. The signature is the same as
regular dictionaries, but keyword arguments are not recommended because
their insertion order is arbitrary.
'''
if not args:
raise TypeError("descriptor '__init__' of 'OrderedDict' object "
"needs an argument")
self, *args = args
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__hardroot = _Link()
self.__root = root = _proxy(self.__hardroot)
root.prev = root.next = root
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value,
dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link at the end of the linked list,
# and the inherited dictionary is updated with the new key/value pair.
if key not in self:
self.__map[key] = link = Link()
root = self.__root
last = root.prev
link.prev, link.next, link.key = last, root, key
last.next = link
root.prev = proxy(link)
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which gets
# removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link = self.__map.pop(key)
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
link.prev = None
link.next = None
def __iter__(self):
'od.__iter__() <==> iter(od)'
# Traverse the linked list in order.
root = self.__root
curr = root.next
while curr is not root:
yield curr.key
curr = curr.next
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
# Traverse the linked list in reverse order.
root = self.__root
curr = root.prev
while curr is not root:
yield curr.key
curr = curr.prev
def clear(self):
'od.clear() -> None. Remove all items from od.'
root = self.__root
root.prev = root.next = root
self.__map.clear()
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root.prev
link_prev = link.prev
link_prev.next = root
root.prev = link_prev
else:
link = root.next
link_next = link.next
root.next = link_next
link_next.prev = root
key = link.key
del self.__map[key]
value = dict.pop(self, key)
return key, value
def move_to_end(self, key, last=True):
'''Move an existing element to the end (or beginning if last==False).
Raises KeyError if the element does not exist.
When last=True, acts like a fast version of self[key]=self.pop(key).
'''
link = self.__map[key]
link_prev = link.prev
link_next = link.next
soft_link = link_next.prev
link_prev.next = link_next
link_next.prev = link_prev
root = self.__root
if last:
last = root.prev
link.prev = last
link.next = root
root.prev = soft_link
last.next = link
else:
first = root.next
link.prev = root
link.next = first
first.prev = soft_link
root.next = link
def __sizeof__(self):
sizeof = _sys.getsizeof
n = len(self) + 1 # number of links including root
size = sizeof(self.__dict__) # instance dictionary
size += sizeof(self.__map) * 2 # internal dict and inherited dict
size += sizeof(self.__hardroot) * n # link objects
size += sizeof(self.__root) * n # proxy objects
return size
update = __update = MutableMapping.update
def keys(self):
"D.keys() -> a set-like object providing a view on D's keys"
return _OrderedDictKeysView(self)
def items(self):
"D.items() -> a set-like object providing a view on D's items"
return _OrderedDictItemsView(self)
def values(self):
"D.values() -> an object providing a view on D's values"
return _OrderedDictValuesView(self)
__ne__ = MutableMapping.__ne__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding
value. If key is not found, d is returned if given, otherwise KeyError
is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
@_recursive_repr()
def __repr__(self):
'od.__repr__() <==> repr(od)'
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self.items()))
def __reduce__(self):
'Return state information for pickling'
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
return self.__class__, (), inst_dict or None, None, iter(self.items())
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
If not specified, the value defaults to None.
'''
self = cls()
for key in iterable:
self[key] = value
return self
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return dict.__eq__(self, other) and all(map(_eq, self, other))
return dict.__eq__(self, other)
try:
from _collections import OrderedDict
except ImportError:
# Leave the pure Python version in place.
pass
################################################################################
### namedtuple
################################################################################
_class_template = """\
from builtins import property as _property, tuple as _tuple
from operator import itemgetter as _itemgetter
from collections import OrderedDict
class {typename}(tuple):
'{typename}({arg_list})'
__slots__ = ()
_fields = {field_names!r}
def __new__(_cls, {arg_list}):
'Create new instance of {typename}({arg_list})'
return _tuple.__new__(_cls, ({arg_list}))
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new {typename} object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != {num_fields:d}:
raise TypeError('Expected {num_fields:d} arguments, got %d' % len(result))
return result
def _replace(_self, **kwds):
'Return a new {typename} object replacing specified fields with new values'
result = _self._make(map(kwds.pop, {field_names!r}, _self))
if kwds:
raise ValueError('Got unexpected field names: %r' % list(kwds))
return result
def __repr__(self):
'Return a nicely formatted representation string'
return self.__class__.__name__ + '({repr_fmt})' % self
def _asdict(self):
'Return a new OrderedDict which maps field names to their values.'
return OrderedDict(zip(self._fields, self))
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return tuple(self)
{field_defs}
"""
_repr_template = '{name}=%r'
_field_template = '''\
{name} = _property(_itemgetter({index:d}), doc='Alias for field number {index:d}')
'''
def namedtuple(typename, field_names, *, verbose=False, rename=False, module=None):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessible by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
typename = str(typename)
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not name.isidentifier()
or _iskeyword(name)
or name.startswith('_')
or name in seen):
field_names[index] = '_%d' % index
seen.add(name)
for name in [typename] + field_names:
if type(name) is not str:
raise TypeError('Type names and field names must be strings')
if not name.isidentifier():
raise ValueError('Type names and field names must be valid '
'identifiers: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a '
'keyword: %r' % name)
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
'%r' % name)
if name in seen:
raise ValueError('Encountered duplicate field name: %r' % name)
seen.add(name)
# Fill-in the class template
class_definition = _class_template.format(
typename = typename,
field_names = tuple(field_names),
num_fields = len(field_names),
arg_list = repr(tuple(field_names)).replace("'", "")[1:-1],
repr_fmt = ', '.join(_repr_template.format(name=name)
for name in field_names),
field_defs = '\n'.join(_field_template.format(index=index, name=name)
for index, name in enumerate(field_names))
)
# Execute the template string in a temporary namespace and support
# tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(__name__='namedtuple_%s' % typename)
exec(class_definition, namespace)
result = namespace[typename]
result._source = class_definition
if verbose:
print(result._source)
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython), or where the user has
# specified a particular module.
if module is None:
try:
module = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
if module is not None:
result.__module__ = module
return result
########################################################################
### Counter
########################################################################
def _count_elements(mapping, iterable):
'Tally elements from the iterable.'
mapping_get = mapping.get
for elem in iterable:
mapping[elem] = mapping_get(elem, 0) + 1
try: # Load C helper function if available
from _collections import _count_elements
except ImportError:
pass
class Counter(dict):
'''Dict subclass for counting hashable items. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> c = Counter('abcdeabcdabcaba') # count elements from a string
>>> c.most_common(3) # three most common elements
[('a', 5), ('b', 4), ('c', 3)]
>>> sorted(c) # list all unique elements
['a', 'b', 'c', 'd', 'e']
>>> ''.join(sorted(c.elements())) # list elements with repetitions
'aaaaabbbbcccdde'
>>> sum(c.values()) # total of all counts
15
>>> c['a'] # count of letter 'a'
5
>>> for elem in 'shazam': # update counts from an iterable
... c[elem] += 1 # by adding 1 to each element's count
>>> c['a'] # now there are seven 'a'
7
>>> del c['b'] # remove all 'b'
>>> c['b'] # now there are zero 'b'
0
>>> d = Counter('simsalabim') # make another counter
>>> c.update(d) # add in the second counter
>>> c['a'] # now there are nine 'a'
9
>>> c.clear() # empty the counter
>>> c
Counter()
Note: If a count is set to zero or reduced to zero, it will remain
in the counter until the entry is deleted or the counter is cleared:
>>> c = Counter('aaabbc')
>>> c['b'] -= 2 # reduce the count of 'b' by two
>>> c.most_common() # 'b' is still in, but its count is zero
[('a', 3), ('c', 1), ('b', 0)]
'''
# References:
# http://en.wikipedia.org/wiki/Multiset
# http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html
# http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm
# http://code.activestate.com/recipes/259174/
# Knuth, TAOCP Vol. II section 4.6.3
def __init__(*args, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
if not args:
raise TypeError("descriptor '__init__' of 'Counter' object "
"needs an argument")
self, *args = args
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
super(Counter, self).__init__()
self.update(*args, **kwds)
def __missing__(self, key):
'The count of elements not in the Counter is zero.'
# Needed so that self[missing_item] does not raise KeyError
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abcdeabcdabcaba').most_common(3)
[('a', 5), ('b', 4), ('c', 3)]
'''
# Emulate Bag.sortedByCount from Smalltalk
if n is None:
return sorted(self.items(), key=_itemgetter(1), reverse=True)
return _heapq.nlargest(n, self.items(), key=_itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
# Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1
>>> prime_factors = Counter({2: 2, 3: 3, 17: 1})
>>> product = 1
>>> for factor in prime_factors.elements(): # loop over factors
... product *= factor # and multiply them
>>> product
1836
Note, if an element's count has been set to zero or is a negative
number, elements() will ignore it.
'''
# Emulate Bag.do from Smalltalk and Multiset.begin from C++.
return _chain.from_iterable(_starmap(_repeat, self.items()))
# Override dict methods where necessary
@classmethod
def fromkeys(cls, iterable, v=None):
# There is no equivalent method for counters because setting v=1
# means that no element can have a count greater than one.
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(*args, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
# The regular dict.update() operation makes no sense here because the
# replace behavior results in the some of original untouched counts
# being mixed-in with all of the other counts for a mismash that
# doesn't have a straight-forward interpretation in most counting
# contexts. Instead, we implement straight-addition. Both the inputs
# and outputs are allowed to contain zero and negative counts.
if not args:
raise TypeError("descriptor 'update' of 'Counter' object "
"needs an argument")
self, *args = args
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
iterable = args[0] if args else None
if iterable is not None:
if isinstance(iterable, Mapping):
if self:
self_get = self.get
for elem, count in iterable.items():
self[elem] = count + self_get(elem, 0)
else:
super(Counter, self).update(iterable) # fast path when counter is empty
else:
_count_elements(self, iterable)
if kwds:
self.update(kwds)
def subtract(*args, **kwds):
'''Like dict.update() but subtracts counts instead of replacing them.
Counts can be reduced below zero. Both the inputs and outputs are
allowed to contain zero and negative counts.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.subtract('witch') # subtract elements from another iterable
>>> c.subtract(Counter('watch')) # subtract elements from another counter
>>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch
0
>>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch
-1
'''
if not args:
raise TypeError("descriptor 'subtract' of 'Counter' object "
"needs an argument")
self, *args = args
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
iterable = args[0] if args else None
if iterable is not None:
self_get = self.get
if isinstance(iterable, Mapping):
for elem, count in iterable.items():
self[elem] = self_get(elem, 0) - count
else:
for elem in iterable:
self[elem] = self_get(elem, 0) - 1
if kwds:
self.subtract(kwds)
def copy(self):
'Return a shallow copy.'
return self.__class__(self)
def __reduce__(self):
return self.__class__, (dict(self),)
def __delitem__(self, elem):
'Like dict.__delitem__() but does not raise KeyError for missing values.'
if elem in self:
super().__delitem__(elem)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
try:
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
except TypeError:
# handle case where values are not orderable
return '{0}({1!r})'.format(self.__class__.__name__, dict(self))
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an empty counter:
# c += Counter()
def __add__(self, other):
'''Add counts from two counters.
>>> Counter('abbb') + Counter('bcc')
Counter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count + other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __sub__(self, other):
''' Subtract count, but keep only results with positive counts.
>>> Counter('abbbc') - Counter('bccd')
Counter({'b': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count - other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count < 0:
result[elem] = 0 - count
return result
def __or__(self, other):
'''Union is the maximum of value in either of the input counters.
>>> Counter('abbb') | Counter('bcc')
Counter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = other_count if count < other_count else count
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __and__(self, other):
''' Intersection is the minimum of corresponding counts.
>>> Counter('abbb') & Counter('bcc')
Counter({'b': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = count if count < other_count else other_count
if newcount > 0:
result[elem] = newcount
return result
def __pos__(self):
'Adds an empty counter, effectively stripping negative and zero counts'
result = Counter()
for elem, count in self.items():
if count > 0:
result[elem] = count
return result
def __neg__(self):
'''Subtracts from an empty counter. Strips positive and zero counts,
and flips the sign on negative counts.
'''
result = Counter()
for elem, count in self.items():
if count < 0:
result[elem] = 0 - count
return result
def _keep_positive(self):
'''Internal method to strip elements with a negative or zero count'''
nonpositive = [elem for elem, count in self.items() if not count > 0]
for elem in nonpositive:
del self[elem]
return self
def __iadd__(self, other):
'''Inplace add from another counter, keeping only positive counts.
>>> c = Counter('abbb')
>>> c += Counter('bcc')
>>> c
Counter({'b': 4, 'c': 2, 'a': 1})
'''
for elem, count in other.items():
self[elem] += count
return self._keep_positive()
def __isub__(self, other):
'''Inplace subtract counter, but keep only results with positive counts.
>>> c = Counter('abbbc')
>>> c -= Counter('bccd')
>>> c
Counter({'b': 2, 'a': 1})
'''
for elem, count in other.items():
self[elem] -= count
return self._keep_positive()
def __ior__(self, other):
'''Inplace union is the maximum of value from either counter.
>>> c = Counter('abbb')
>>> c |= Counter('bcc')
>>> c
Counter({'b': 3, 'c': 2, 'a': 1})
'''
for elem, other_count in other.items():
count = self[elem]
if other_count > count:
self[elem] = other_count
return self._keep_positive()
def __iand__(self, other):
'''Inplace intersection is the minimum of corresponding counts.
>>> c = Counter('abbb')
>>> c &= Counter('bcc')
>>> c
Counter({'b': 1})
'''
for elem, count in self.items():
other_count = other[elem]
if other_count < count:
self[elem] = other_count
return self._keep_positive()
########################################################################
### ChainMap
########################################################################
class ChainMap(MutableMapping):
''' A ChainMap groups multiple dicts (or other mappings) together
to create a single, updateable view.
The underlying mappings are stored in a list. That list is public and can
be accessed or updated using the *maps* attribute. There is no other
state.
Lookups search the underlying mappings successively until a key is found.
In contrast, writes, updates, and deletions only operate on the first
mapping.
'''
def __init__(self, *maps):
'''Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.
'''
self.maps = list(maps) or [{}] # always at least one map
def __missing__(self, key):
raise KeyError(key)
def __getitem__(self, key):
for mapping in self.maps:
try:
return mapping[key] # can't use 'key in mapping' with defaultdict
except KeyError:
pass
return self.__missing__(key) # support subclasses that define __missing__
def get(self, key, default=None):
return self[key] if key in self else default
def __len__(self):
return len(set().union(*self.maps)) # reuses stored hash values if possible
def __iter__(self):
return iter(set().union(*self.maps))
def __contains__(self, key):
return any(key in m for m in self.maps)
def __bool__(self):
return any(self.maps)
@_recursive_repr()
def __repr__(self):
return '{0.__class__.__name__}({1})'.format(
self, ', '.join(map(repr, self.maps)))
@classmethod
def fromkeys(cls, iterable, *args):
'Create a ChainMap with a single dict created from the iterable.'
return cls(dict.fromkeys(iterable, *args))
def copy(self):
'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'
return self.__class__(self.maps[0].copy(), *self.maps[1:])
__copy__ = copy
def new_child(self, m=None): # like Django's Context.push()
'''New ChainMap with a new map followed by all previous maps.
If no map is provided, an empty dict is used.
'''
if m is None:
m = {}
return self.__class__(m, *self.maps)
@property
def parents(self): # like Django's Context.pop()
'New ChainMap from maps[1:].'
return self.__class__(*self.maps[1:])
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, key):
try:
del self.maps[0][key]
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def popitem(self):
'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
try:
return self.maps[0].popitem()
except KeyError:
raise KeyError('No keys found in the first mapping.')
def pop(self, key, *args):
'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
try:
return self.maps[0].pop(key, *args)
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def clear(self):
'Clear maps[0], leaving maps[1:] intact.'
self.maps[0].clear()
################################################################################
### UserDict
################################################################################
class UserDict(MutableMapping):
# Start by filling-out the abstract methods
def __init__(*args, **kwargs):
if not args:
raise TypeError("descriptor '__init__' of 'UserDict' object "
"needs an argument")
self, *args = args
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
if args:
dict = args[0]
elif 'dict' in kwargs:
dict = kwargs.pop('dict')
import warnings
warnings.warn("Passing 'dict' as keyword argument is deprecated",
DeprecationWarning, stacklevel=2)
else:
dict = None
self.data = {}
if dict is not None:
self.update(dict)
if len(kwargs):
self.update(kwargs)
def __len__(self): return len(self.data)
def __getitem__(self, key):
if key in self.data:
return self.data[key]
if hasattr(self.__class__, "__missing__"):
return self.__class__.__missing__(self, key)
raise KeyError(key)
def __setitem__(self, key, item): self.data[key] = item
def __delitem__(self, key): del self.data[key]
def __iter__(self):
return iter(self.data)
# Modify __contains__ to work correctly when __missing__ is present
def __contains__(self, key):
return key in self.data
# Now, add the methods in dicts but not in MutableMapping
def __repr__(self): return repr(self.data)
def copy(self):
if self.__class__ is UserDict:
return UserDict(self.data.copy())
import copy
data = self.data
try:
self.data = {}
c = copy.copy(self)
finally:
self.data = data
c.update(self)
return c
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
################################################################################
### UserList
################################################################################
class UserList(MutableSequence):
"""A more or less complete user-defined wrapper around list objects."""
def __init__(self, initlist=None):
self.data = []
if initlist is not None:
# XXX should this accept an arbitrary sequence?
if type(initlist) == type(self.data):
self.data[:] = initlist
elif isinstance(initlist, UserList):
self.data[:] = initlist.data[:]
else:
self.data = list(initlist)
def __repr__(self): return repr(self.data)
def __lt__(self, other): return self.data < self.__cast(other)
def __le__(self, other): return self.data <= self.__cast(other)
def __eq__(self, other): return self.data == self.__cast(other)
def __gt__(self, other): return self.data > self.__cast(other)
def __ge__(self, other): return self.data >= self.__cast(other)
def __cast(self, other):
return other.data if isinstance(other, UserList) else other
def __contains__(self, item): return item in self.data
def __len__(self): return len(self.data)
def __getitem__(self, i): return self.data[i]
def __setitem__(self, i, item): self.data[i] = item
def __delitem__(self, i): del self.data[i]
def __add__(self, other):
if isinstance(other, UserList):
return self.__class__(self.data + other.data)
elif isinstance(other, type(self.data)):
return self.__class__(self.data + other)
return self.__class__(self.data + list(other))
def __radd__(self, other):
if isinstance(other, UserList):
return self.__class__(other.data + self.data)
elif isinstance(other, type(self.data)):
return self.__class__(other + self.data)
return self.__class__(list(other) + self.data)
def __iadd__(self, other):
if isinstance(other, UserList):
self.data += other.data
elif isinstance(other, type(self.data)):
self.data += other
else:
self.data += list(other)
return self
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
return self
def append(self, item): self.data.append(item)
def insert(self, i, item): self.data.insert(i, item)
def pop(self, i=-1): return self.data.pop(i)
def remove(self, item): self.data.remove(item)
def clear(self): self.data.clear()
def copy(self): return self.__class__(self)
def count(self, item): return self.data.count(item)
def index(self, item, *args): return self.data.index(item, *args)
def reverse(self): self.data.reverse()
def sort(self, *args, **kwds): self.data.sort(*args, **kwds)
def extend(self, other):
if isinstance(other, UserList):
self.data.extend(other.data)
else:
self.data.extend(other)
################################################################################
### UserString
################################################################################
class UserString(Sequence):
def __init__(self, seq):
if isinstance(seq, str):
self.data = seq
elif isinstance(seq, UserString):
self.data = seq.data[:]
else:
self.data = str(seq)
def __str__(self): return str(self.data)
def __repr__(self): return repr(self.data)
def __int__(self): return int(self.data)
def __float__(self): return float(self.data)
def __complex__(self): return complex(self.data)
def __hash__(self): return hash(self.data)
def __getnewargs__(self):
return (self.data[:],)
def __eq__(self, string):
if isinstance(string, UserString):
return self.data == string.data
return self.data == string
def __lt__(self, string):
if isinstance(string, UserString):
return self.data < string.data
return self.data < string
def __le__(self, string):
if isinstance(string, UserString):
return self.data <= string.data
return self.data <= string
def __gt__(self, string):
if isinstance(string, UserString):
return self.data > string.data
return self.data > string
def __ge__(self, string):
if isinstance(string, UserString):
return self.data >= string.data
return self.data >= string
def __contains__(self, char):
if isinstance(char, UserString):
char = char.data
return char in self.data
def __len__(self): return len(self.data)
def __getitem__(self, index): return self.__class__(self.data[index])
def __add__(self, other):
if isinstance(other, UserString):
return self.__class__(self.data + other.data)
elif isinstance(other, str):
return self.__class__(self.data + other)
return self.__class__(self.data + str(other))
def __radd__(self, other):
if isinstance(other, str):
return self.__class__(other + self.data)
return self.__class__(str(other) + self.data)
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __mod__(self, args):
return self.__class__(self.data % args)
def __rmod__(self, format):
return self.__class__(format % args)
# the following methods are defined in alphabetical order:
def capitalize(self): return self.__class__(self.data.capitalize())
def casefold(self):
return self.__class__(self.data.casefold())
def center(self, width, *args):
return self.__class__(self.data.center(width, *args))
def count(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.count(sub, start, end)
def encode(self, encoding=None, errors=None): # XXX improve this?
if encoding:
if errors:
return self.__class__(self.data.encode(encoding, errors))
return self.__class__(self.data.encode(encoding))
return self.__class__(self.data.encode())
def endswith(self, suffix, start=0, end=_sys.maxsize):
return self.data.endswith(suffix, start, end)
def expandtabs(self, tabsize=8):
return self.__class__(self.data.expandtabs(tabsize))
def find(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.find(sub, start, end)
def format(self, *args, **kwds):
return self.data.format(*args, **kwds)
def format_map(self, mapping):
return self.data.format_map(mapping)
def index(self, sub, start=0, end=_sys.maxsize):
return self.data.index(sub, start, end)
def isalpha(self): return self.data.isalpha()
def isalnum(self): return self.data.isalnum()
def isdecimal(self): return self.data.isdecimal()
def isdigit(self): return self.data.isdigit()
def isidentifier(self): return self.data.isidentifier()
def islower(self): return self.data.islower()
def isnumeric(self): return self.data.isnumeric()
def isprintable(self): return self.data.isprintable()
def isspace(self): return self.data.isspace()
def istitle(self): return self.data.istitle()
def isupper(self): return self.data.isupper()
def join(self, seq): return self.data.join(seq)
def ljust(self, width, *args):
return self.__class__(self.data.ljust(width, *args))
def lower(self): return self.__class__(self.data.lower())
def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars))
maketrans = str.maketrans
def partition(self, sep):
return self.data.partition(sep)
def replace(self, old, new, maxsplit=-1):
if isinstance(old, UserString):
old = old.data
if isinstance(new, UserString):
new = new.data
return self.__class__(self.data.replace(old, new, maxsplit))
def rfind(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.rfind(sub, start, end)
def rindex(self, sub, start=0, end=_sys.maxsize):
return self.data.rindex(sub, start, end)
def rjust(self, width, *args):
return self.__class__(self.data.rjust(width, *args))
def rpartition(self, sep):
return self.data.rpartition(sep)
def rstrip(self, chars=None):
return self.__class__(self.data.rstrip(chars))
def split(self, sep=None, maxsplit=-1):
return self.data.split(sep, maxsplit)
def rsplit(self, sep=None, maxsplit=-1):
return self.data.rsplit(sep, maxsplit)
def splitlines(self, keepends=False): return self.data.splitlines(keepends)
def startswith(self, prefix, start=0, end=_sys.maxsize):
return self.data.startswith(prefix, start, end)
def strip(self, chars=None): return self.__class__(self.data.strip(chars))
def swapcase(self): return self.__class__(self.data.swapcase())
def title(self): return self.__class__(self.data.title())
def translate(self, *args):
return self.__class__(self.data.translate(*args))
def upper(self): return self.__class__(self.data.upper())
def zfill(self, width): return self.__class__(self.data.zfill(width))
|
farodin91/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/pytest/testing/test_tmpdir.py | 173 | import sys
import py
import pytest
from _pytest.tmpdir import tmpdir
def test_funcarg(testdir):
testdir.makepyfile("""
def pytest_generate_tests(metafunc):
metafunc.addcall(id='a')
metafunc.addcall(id='b')
def test_func(tmpdir): pass
""")
from _pytest.tmpdir import TempdirFactory
reprec = testdir.inline_run()
calls = reprec.getcalls("pytest_runtest_setup")
item = calls[0].item
config = item.config
tmpdirhandler = TempdirFactory(config)
item._initrequest()
p = tmpdir(item._request, tmpdirhandler)
assert p.check()
bn = p.basename.strip("0123456789")
assert bn.endswith("test_func_a_")
item.name = "qwe/\\abc"
p = tmpdir(item._request, tmpdirhandler)
assert p.check()
bn = p.basename.strip("0123456789")
assert bn == "qwe__abc"
def test_ensuretemp(recwarn):
#pytest.deprecated_call(pytest.ensuretemp, 'hello')
d1 = pytest.ensuretemp('hello')
d2 = pytest.ensuretemp('hello')
assert d1 == d2
assert d1.check(dir=1)
class TestTempdirHandler:
def test_mktemp(self, testdir):
from _pytest.tmpdir import TempdirFactory
config = testdir.parseconfig()
config.option.basetemp = testdir.mkdir("hello")
t = TempdirFactory(config)
tmp = t.mktemp("world")
assert tmp.relto(t.getbasetemp()) == "world0"
tmp = t.mktemp("this")
assert tmp.relto(t.getbasetemp()).startswith("this")
tmp2 = t.mktemp("this")
assert tmp2.relto(t.getbasetemp()).startswith("this")
assert tmp2 != tmp
class TestConfigTmpdir:
def test_getbasetemp_custom_removes_old(self, testdir):
mytemp = testdir.tmpdir.join("xyz")
p = testdir.makepyfile("""
def test_1(tmpdir):
pass
""")
testdir.runpytest(p, '--basetemp=%s' % mytemp)
mytemp.check()
mytemp.ensure("hello")
testdir.runpytest(p, '--basetemp=%s' % mytemp)
mytemp.check()
assert not mytemp.join("hello").check()
def test_basetemp(testdir):
mytemp = testdir.tmpdir.mkdir("mytemp")
p = testdir.makepyfile("""
import pytest
def test_1():
pytest.ensuretemp("hello")
""")
result = testdir.runpytest(p, '--basetemp=%s' % mytemp)
assert result.ret == 0
assert mytemp.join('hello').check()
@pytest.mark.skipif(not hasattr(py.path.local, 'mksymlinkto'),
reason="symlink not available on this platform")
def test_tmpdir_always_is_realpath(testdir):
# the reason why tmpdir should be a realpath is that
# when you cd to it and do "os.getcwd()" you will anyway
# get the realpath. Using the symlinked path can thus
# easily result in path-inequality
# XXX if that proves to be a problem, consider using
# os.environ["PWD"]
realtemp = testdir.tmpdir.mkdir("myrealtemp")
linktemp = testdir.tmpdir.join("symlinktemp")
linktemp.mksymlinkto(realtemp)
p = testdir.makepyfile("""
def test_1(tmpdir):
import os
assert os.path.realpath(str(tmpdir)) == str(tmpdir)
""")
result = testdir.runpytest("-s", p, '--basetemp=%s/bt' % linktemp)
assert not result.ret
def test_tmpdir_too_long_on_parametrization(testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.parametrize("arg", ["1"*1000])
def test_some(arg, tmpdir):
tmpdir.ensure("hello")
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_tmpdir_factory(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(scope='session')
def session_dir(tmpdir_factory):
return tmpdir_factory.mktemp('data', numbered=False)
def test_some(session_dir):
session_dir.isdir()
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_tmpdir_fallback_tox_env(testdir, monkeypatch):
"""Test that tmpdir works even if environment variables required by getpass
module are missing (#1010).
"""
monkeypatch.delenv('USER', raising=False)
monkeypatch.delenv('USERNAME', raising=False)
testdir.makepyfile("""
import pytest
def test_some(tmpdir):
assert tmpdir.isdir()
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
@pytest.fixture
def break_getuser(monkeypatch):
monkeypatch.setattr('os.getuid', lambda: -1)
# taken from python 2.7/3.4
for envvar in ('LOGNAME', 'USER', 'LNAME', 'USERNAME'):
monkeypatch.delenv(envvar, raising=False)
@pytest.mark.usefixtures("break_getuser")
@pytest.mark.skipif(sys.platform.startswith('win'), reason='no os.getuid on windows')
def test_tmpdir_fallback_uid_not_found(testdir):
"""Test that tmpdir works even if the current process's user id does not
correspond to a valid user.
"""
testdir.makepyfile("""
import pytest
def test_some(tmpdir):
assert tmpdir.isdir()
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
@pytest.mark.usefixtures("break_getuser")
@pytest.mark.skipif(sys.platform.startswith('win'), reason='no os.getuid on windows')
def test_get_user_uid_not_found():
"""Test that get_user() function works even if the current process's
user id does not correspond to a valid user (e.g. running pytest in a
Docker container with 'docker run -u'.
"""
from _pytest.tmpdir import get_user
assert get_user() is None
@pytest.mark.skipif(not sys.platform.startswith('win'), reason='win only')
def test_get_user(monkeypatch):
"""Test that get_user() function works even if environment variables
required by getpass module are missing from the environment on Windows
(#1010).
"""
from _pytest.tmpdir import get_user
monkeypatch.delenv('USER', raising=False)
monkeypatch.delenv('USERNAME', raising=False)
assert get_user() is None
|
da1z/intellij-community | refs/heads/master | python/testData/intentions/PyConvertFormatOperatorToMethodIntentionTest/simple_after.py | 33 | print("|\077{:>20}FOO! {: 5.3F}%\n|{:<15} {:<10d}-torn-off, {:+#05d}".format("right-->", 12, "<--left", -10, 20)) |
funtoo/portage-funtoo | refs/heads/2013-06-07 | pym/portage/tests/resolver/test_depth.py | 16 | # Copyright 2011 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage.tests import TestCase
from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
ResolverPlaygroundTestCase)
class ResolverDepthTestCase(TestCase):
def testResolverDepth(self):
ebuilds = {
"dev-libs/A-1": {"RDEPEND" : "dev-libs/B"},
"dev-libs/A-2": {"RDEPEND" : "dev-libs/B"},
"dev-libs/B-1": {"RDEPEND" : "dev-libs/C"},
"dev-libs/B-2": {"RDEPEND" : "dev-libs/C"},
"dev-libs/C-1": {},
"dev-libs/C-2": {},
"virtual/libusb-0" : {"EAPI" :"2", "SLOT" : "0", "RDEPEND" : "|| ( >=dev-libs/libusb-0.1.12-r1:0 dev-libs/libusb-compat >=sys-freebsd/freebsd-lib-8.0[usb] )"},
"virtual/libusb-1" : {"EAPI" :"2", "SLOT" : "1", "RDEPEND" : ">=dev-libs/libusb-1.0.4:1"},
"dev-libs/libusb-0.1.13" : {},
"dev-libs/libusb-1.0.5" : {"SLOT":"1"},
"dev-libs/libusb-compat-1" : {},
"sys-freebsd/freebsd-lib-8": {"IUSE" : "+usb"},
"sys-fs/udev-164" : {"EAPI" : "1", "RDEPEND" : "virtual/libusb:0"},
"virtual/jre-1.5.0" : {"SLOT" : "1.5", "RDEPEND" : "|| ( =dev-java/sun-jre-bin-1.5.0* =virtual/jdk-1.5.0* )"},
"virtual/jre-1.5.0-r1" : {"SLOT" : "1.5", "RDEPEND" : "|| ( =dev-java/sun-jre-bin-1.5.0* =virtual/jdk-1.5.0* )"},
"virtual/jre-1.6.0" : {"SLOT" : "1.6", "RDEPEND" : "|| ( =dev-java/sun-jre-bin-1.6.0* =virtual/jdk-1.6.0* )"},
"virtual/jre-1.6.0-r1" : {"SLOT" : "1.6", "RDEPEND" : "|| ( =dev-java/sun-jre-bin-1.6.0* =virtual/jdk-1.6.0* )"},
"virtual/jdk-1.5.0" : {"SLOT" : "1.5", "RDEPEND" : "|| ( =dev-java/sun-jdk-1.5.0* dev-java/gcj-jdk )"},
"virtual/jdk-1.5.0-r1" : {"SLOT" : "1.5", "RDEPEND" : "|| ( =dev-java/sun-jdk-1.5.0* dev-java/gcj-jdk )"},
"virtual/jdk-1.6.0" : {"SLOT" : "1.6", "RDEPEND" : "|| ( =dev-java/icedtea-6* =dev-java/sun-jdk-1.6.0* )"},
"virtual/jdk-1.6.0-r1" : {"SLOT" : "1.6", "RDEPEND" : "|| ( =dev-java/icedtea-6* =dev-java/sun-jdk-1.6.0* )"},
"dev-java/gcj-jdk-4.5" : {},
"dev-java/gcj-jdk-4.5-r1" : {},
"dev-java/icedtea-6.1" : {},
"dev-java/icedtea-6.1-r1" : {},
"dev-java/sun-jdk-1.5" : {"SLOT" : "1.5"},
"dev-java/sun-jdk-1.6" : {"SLOT" : "1.6"},
"dev-java/sun-jre-bin-1.5" : {"SLOT" : "1.5"},
"dev-java/sun-jre-bin-1.6" : {"SLOT" : "1.6"},
"dev-java/ant-core-1.8" : {"DEPEND" : ">=virtual/jdk-1.4"},
"dev-db/hsqldb-1.8" : {"RDEPEND" : ">=virtual/jre-1.6"},
}
installed = {
"dev-libs/A-1": {"RDEPEND" : "dev-libs/B"},
"dev-libs/B-1": {"RDEPEND" : "dev-libs/C"},
"dev-libs/C-1": {},
"virtual/jre-1.5.0" : {"SLOT" : "1.5", "RDEPEND" : "|| ( =virtual/jdk-1.5.0* =dev-java/sun-jre-bin-1.5.0* )"},
"virtual/jre-1.6.0" : {"SLOT" : "1.6", "RDEPEND" : "|| ( =virtual/jdk-1.6.0* =dev-java/sun-jre-bin-1.6.0* )"},
"virtual/jdk-1.5.0" : {"SLOT" : "1.5", "RDEPEND" : "|| ( =dev-java/sun-jdk-1.5.0* dev-java/gcj-jdk )"},
"virtual/jdk-1.6.0" : {"SLOT" : "1.6", "RDEPEND" : "|| ( =dev-java/icedtea-6* =dev-java/sun-jdk-1.6.0* )"},
"dev-java/gcj-jdk-4.5" : {},
"dev-java/icedtea-6.1" : {},
"virtual/libusb-0" : {"EAPI" :"2", "SLOT" : "0", "RDEPEND" : "|| ( >=dev-libs/libusb-0.1.12-r1:0 dev-libs/libusb-compat >=sys-freebsd/freebsd-lib-8.0[usb] )"},
}
world = ["dev-libs/A"]
test_cases = (
ResolverPlaygroundTestCase(
["dev-libs/A"],
options = {"--update": True, "--deep": 0},
success = True,
mergelist = ["dev-libs/A-2"]),
ResolverPlaygroundTestCase(
["dev-libs/A"],
options = {"--update": True, "--deep": 1},
success = True,
mergelist = ["dev-libs/B-2", "dev-libs/A-2"]),
ResolverPlaygroundTestCase(
["dev-libs/A"],
options = {"--update": True, "--deep": 2},
success = True,
mergelist = ["dev-libs/C-2", "dev-libs/B-2", "dev-libs/A-2"]),
ResolverPlaygroundTestCase(
["@world"],
options = {"--update": True, "--deep": True},
success = True,
mergelist = ["dev-libs/C-2", "dev-libs/B-2", "dev-libs/A-2"]),
ResolverPlaygroundTestCase(
["@world"],
options = {"--emptytree": True},
success = True,
mergelist = ["dev-libs/C-2", "dev-libs/B-2", "dev-libs/A-2"]),
ResolverPlaygroundTestCase(
["@world"],
options = {"--selective": True, "--deep": True},
success = True,
mergelist = []),
ResolverPlaygroundTestCase(
["dev-libs/A"],
options = {"--deep": 2},
success = True,
mergelist = ["dev-libs/A-2"]),
ResolverPlaygroundTestCase(
["virtual/jre"],
options = {},
success = True,
mergelist = ['virtual/jre-1.6.0-r1']),
ResolverPlaygroundTestCase(
["virtual/jre"],
options = {"--deep" : True},
success = True,
mergelist = ['virtual/jre-1.6.0-r1']),
# Test bug #141118, where we avoid pulling in
# redundant deps, satisfying nested virtuals
# as efficiently as possible.
ResolverPlaygroundTestCase(
["virtual/jre"],
options = {"--selective" : True, "--deep" : True},
success = True,
mergelist = []),
# Test bug #150361, where depgraph._greedy_slots()
# is triggered by --update with AtomArg.
ResolverPlaygroundTestCase(
["virtual/jre"],
options = {"--update" : True},
success = True,
ambiguous_merge_order = True,
mergelist = [('virtual/jre-1.6.0-r1', 'virtual/jre-1.5.0-r1')]),
# Recursively traversed virtual dependencies, and their
# direct dependencies, are considered to have the same
# depth as direct dependencies.
ResolverPlaygroundTestCase(
["virtual/jre"],
options = {"--update" : True, "--deep" : 1},
success = True,
ambiguous_merge_order = True,
merge_order_assertions=(('dev-java/icedtea-6.1-r1', 'virtual/jdk-1.6.0-r1'), ('virtual/jdk-1.6.0-r1', 'virtual/jre-1.6.0-r1'),
('dev-java/gcj-jdk-4.5-r1', 'virtual/jdk-1.5.0-r1'), ('virtual/jdk-1.5.0-r1', 'virtual/jre-1.5.0-r1')),
mergelist = [('dev-java/icedtea-6.1-r1', 'dev-java/gcj-jdk-4.5-r1', 'virtual/jdk-1.6.0-r1', 'virtual/jdk-1.5.0-r1', 'virtual/jre-1.6.0-r1', 'virtual/jre-1.5.0-r1')]),
ResolverPlaygroundTestCase(
["virtual/jre:1.5"],
options = {"--update" : True},
success = True,
mergelist = ['virtual/jre-1.5.0-r1']),
ResolverPlaygroundTestCase(
["virtual/jre:1.6"],
options = {"--update" : True},
success = True,
mergelist = ['virtual/jre-1.6.0-r1']),
# Test that we don't pull in any unnecessary updates
# when --update is not specified, even though we
# specified --deep.
ResolverPlaygroundTestCase(
["dev-java/ant-core"],
options = {"--deep" : True},
success = True,
mergelist = ["dev-java/ant-core-1.8"]),
ResolverPlaygroundTestCase(
["dev-java/ant-core"],
options = {"--update" : True},
success = True,
mergelist = ["dev-java/ant-core-1.8"]),
# Recursively traversed virtual dependencies, and their
# direct dependencies, are considered to have the same
# depth as direct dependencies.
ResolverPlaygroundTestCase(
["dev-java/ant-core"],
options = {"--update" : True, "--deep" : 1},
success = True,
mergelist = ['dev-java/icedtea-6.1-r1', 'virtual/jdk-1.6.0-r1', 'dev-java/ant-core-1.8']),
ResolverPlaygroundTestCase(
["dev-db/hsqldb"],
options = {"--deep" : True},
success = True,
mergelist = ["dev-db/hsqldb-1.8"]),
# Don't traverse deps of an installed package with --deep=0,
# even if it's a virtual.
ResolverPlaygroundTestCase(
["virtual/libusb:0"],
options = {"--selective" : True, "--deep" : 0},
success = True,
mergelist = []),
# Satisfy unsatisfied dep of installed package with --deep=1.
ResolverPlaygroundTestCase(
["virtual/libusb:0"],
options = {"--selective" : True, "--deep" : 1},
success = True,
mergelist = ['dev-libs/libusb-0.1.13']),
# Pull in direct dep of virtual, even with --deep=0.
ResolverPlaygroundTestCase(
["sys-fs/udev"],
options = {"--deep" : 0},
success = True,
mergelist = ['dev-libs/libusb-0.1.13', 'sys-fs/udev-164']),
# Test --nodeps with direct virtual deps.
ResolverPlaygroundTestCase(
["sys-fs/udev"],
options = {"--nodeps" : True},
success = True,
mergelist = ["sys-fs/udev-164"]),
# Test that --nodeps overrides --deep.
ResolverPlaygroundTestCase(
["sys-fs/udev"],
options = {"--nodeps" : True, "--deep" : True},
success = True,
mergelist = ["sys-fs/udev-164"]),
# Test that --nodeps overrides --emptytree.
ResolverPlaygroundTestCase(
["sys-fs/udev"],
options = {"--nodeps" : True, "--emptytree" : True},
success = True,
mergelist = ["sys-fs/udev-164"]),
# Test --emptytree with virtuals.
ResolverPlaygroundTestCase(
["sys-fs/udev"],
options = {"--emptytree" : True},
success = True,
mergelist = ['dev-libs/libusb-0.1.13', 'virtual/libusb-0', 'sys-fs/udev-164']),
)
playground = ResolverPlayground(ebuilds=ebuilds, installed=installed,
world=world)
try:
for test_case in test_cases:
playground.run_TestCase(test_case)
self.assertEqual(test_case.test_success, True, test_case.fail_msg)
finally:
playground.cleanup()
|
iradul/phantomjs-clone | refs/heads/master | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/watchlist/watchlist_mock.py | 130 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
_log = logging.getLogger(__name__)
class MockWatchList(object):
def determine_cc_and_messages(self, diff):
_log.info("MockWatchList: determine_cc_and_messages")
return {'cc_list': ['abarth@webkit.org', 'eric@webkit.org', 'levin@chromium.org'], 'messages': ['Message1.', 'Message2.'], }
|
Nzaga/home-assistant | refs/heads/master | homeassistant/components/notify/demo.py | 28 | """
homeassistant.components.notify.demo
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Demo notification service.
"""
from homeassistant.components.notify import ATTR_TITLE, BaseNotificationService
EVENT_NOTIFY = "notify"
def get_service(hass, config):
""" Get the demo notification service. """
return DemoNotificationService(hass)
# pylint: disable=too-few-public-methods
class DemoNotificationService(BaseNotificationService):
""" Implements demo notification service. """
def __init__(self, hass):
self.hass = hass
def send_message(self, message="", **kwargs):
""" Send a message to a user. """
title = kwargs.get(ATTR_TITLE)
self.hass.bus.fire(EVENT_NOTIFY, {"title": title, "message": message})
|
DJMelonz/basic-blog | refs/heads/master | django/views/decorators/vary.py | 307 | try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.4 fallback.
from django.utils.cache import patch_vary_headers
from django.utils.decorators import available_attrs
def vary_on_headers(*headers):
"""
A view decorator that adds the specified headers to the Vary header of the
response. Usage:
@vary_on_headers('Cookie', 'Accept-language')
def index(request):
...
Note that the header names are not case-sensitive.
"""
def decorator(func):
def inner_func(*args, **kwargs):
response = func(*args, **kwargs)
patch_vary_headers(response, headers)
return response
return wraps(func, assigned=available_attrs(func))(inner_func)
return decorator
def vary_on_cookie(func):
"""
A view decorator that adds "Cookie" to the Vary header of a response. This
indicates that a page's contents depends on cookies. Usage:
@vary_on_cookie
def index(request):
...
"""
def inner_func(*args, **kwargs):
response = func(*args, **kwargs)
patch_vary_headers(response, ('Cookie',))
return response
return wraps(func, assigned=available_attrs(func))(inner_func)
|
Oliver2213/NVDAYoutube-dl | refs/heads/master | addon/globalPlugins/nvdaYoutubeDL/lib/xml/dom/pulldom.py | 322 | import xml.sax
import xml.sax.handler
import types
try:
_StringTypes = [types.StringType, types.UnicodeType]
except AttributeError:
_StringTypes = [types.StringType]
START_ELEMENT = "START_ELEMENT"
END_ELEMENT = "END_ELEMENT"
COMMENT = "COMMENT"
START_DOCUMENT = "START_DOCUMENT"
END_DOCUMENT = "END_DOCUMENT"
PROCESSING_INSTRUCTION = "PROCESSING_INSTRUCTION"
IGNORABLE_WHITESPACE = "IGNORABLE_WHITESPACE"
CHARACTERS = "CHARACTERS"
class PullDOM(xml.sax.ContentHandler):
_locator = None
document = None
def __init__(self, documentFactory=None):
from xml.dom import XML_NAMESPACE
self.documentFactory = documentFactory
self.firstEvent = [None, None]
self.lastEvent = self.firstEvent
self.elementStack = []
self.push = self.elementStack.append
try:
self.pop = self.elementStack.pop
except AttributeError:
# use class' pop instead
pass
self._ns_contexts = [{XML_NAMESPACE:'xml'}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self.pending_events = []
def pop(self):
result = self.elementStack[-1]
del self.elementStack[-1]
return result
def setDocumentLocator(self, locator):
self._locator = locator
def startPrefixMapping(self, prefix, uri):
if not hasattr(self, '_xmlns_attrs'):
self._xmlns_attrs = []
self._xmlns_attrs.append((prefix or 'xmlns', uri))
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix or None
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts.pop()
def startElementNS(self, name, tagName , attrs):
# Retrieve xml namespace declaration attributes.
xmlns_uri = 'http://www.w3.org/2000/xmlns/'
xmlns_attrs = getattr(self, '_xmlns_attrs', None)
if xmlns_attrs is not None:
for aname, value in xmlns_attrs:
attrs._attrs[(xmlns_uri, aname)] = value
self._xmlns_attrs = []
uri, localname = name
if uri:
# When using namespaces, the reader may or may not
# provide us with the original name. If not, create
# *a* valid tagName from the current context.
if tagName is None:
prefix = self._current_context[uri]
if prefix:
tagName = prefix + ":" + localname
else:
tagName = localname
if self.document:
node = self.document.createElementNS(uri, tagName)
else:
node = self.buildDocument(uri, tagName)
else:
# When the tagname is not prefixed, it just appears as
# localname
if self.document:
node = self.document.createElement(localname)
else:
node = self.buildDocument(None, localname)
for aname,value in attrs.items():
a_uri, a_localname = aname
if a_uri == xmlns_uri:
if a_localname == 'xmlns':
qname = a_localname
else:
qname = 'xmlns:' + a_localname
attr = self.document.createAttributeNS(a_uri, qname)
node.setAttributeNodeNS(attr)
elif a_uri:
prefix = self._current_context[a_uri]
if prefix:
qname = prefix + ":" + a_localname
else:
qname = a_localname
attr = self.document.createAttributeNS(a_uri, qname)
node.setAttributeNodeNS(attr)
else:
attr = self.document.createAttribute(a_localname)
node.setAttributeNode(attr)
attr.value = value
self.lastEvent[1] = [(START_ELEMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
def endElementNS(self, name, tagName):
self.lastEvent[1] = [(END_ELEMENT, self.pop()), None]
self.lastEvent = self.lastEvent[1]
def startElement(self, name, attrs):
if self.document:
node = self.document.createElement(name)
else:
node = self.buildDocument(None, name)
for aname,value in attrs.items():
attr = self.document.createAttribute(aname)
attr.value = value
node.setAttributeNode(attr)
self.lastEvent[1] = [(START_ELEMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
def endElement(self, name):
self.lastEvent[1] = [(END_ELEMENT, self.pop()), None]
self.lastEvent = self.lastEvent[1]
def comment(self, s):
if self.document:
node = self.document.createComment(s)
self.lastEvent[1] = [(COMMENT, node), None]
self.lastEvent = self.lastEvent[1]
else:
event = [(COMMENT, s), None]
self.pending_events.append(event)
def processingInstruction(self, target, data):
if self.document:
node = self.document.createProcessingInstruction(target, data)
self.lastEvent[1] = [(PROCESSING_INSTRUCTION, node), None]
self.lastEvent = self.lastEvent[1]
else:
event = [(PROCESSING_INSTRUCTION, target, data), None]
self.pending_events.append(event)
def ignorableWhitespace(self, chars):
node = self.document.createTextNode(chars)
self.lastEvent[1] = [(IGNORABLE_WHITESPACE, node), None]
self.lastEvent = self.lastEvent[1]
def characters(self, chars):
node = self.document.createTextNode(chars)
self.lastEvent[1] = [(CHARACTERS, node), None]
self.lastEvent = self.lastEvent[1]
def startDocument(self):
if self.documentFactory is None:
import xml.dom.minidom
self.documentFactory = xml.dom.minidom.Document.implementation
def buildDocument(self, uri, tagname):
# Can't do that in startDocument, since we need the tagname
# XXX: obtain DocumentType
node = self.documentFactory.createDocument(uri, tagname, None)
self.document = node
self.lastEvent[1] = [(START_DOCUMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
# Put everything we have seen so far into the document
for e in self.pending_events:
if e[0][0] == PROCESSING_INSTRUCTION:
_,target,data = e[0]
n = self.document.createProcessingInstruction(target, data)
e[0] = (PROCESSING_INSTRUCTION, n)
elif e[0][0] == COMMENT:
n = self.document.createComment(e[0][1])
e[0] = (COMMENT, n)
else:
raise AssertionError("Unknown pending event ",e[0][0])
self.lastEvent[1] = e
self.lastEvent = e
self.pending_events = None
return node.firstChild
def endDocument(self):
self.lastEvent[1] = [(END_DOCUMENT, self.document), None]
self.pop()
def clear(self):
"clear(): Explicitly release parsing structures"
self.document = None
class ErrorHandler:
def warning(self, exception):
print exception
def error(self, exception):
raise exception
def fatalError(self, exception):
raise exception
class DOMEventStream:
def __init__(self, stream, parser, bufsize):
self.stream = stream
self.parser = parser
self.bufsize = bufsize
if not hasattr(self.parser, 'feed'):
self.getEvent = self._slurp
self.reset()
def reset(self):
self.pulldom = PullDOM()
# This content handler relies on namespace support
self.parser.setFeature(xml.sax.handler.feature_namespaces, 1)
self.parser.setContentHandler(self.pulldom)
def __getitem__(self, pos):
rc = self.getEvent()
if rc:
return rc
raise IndexError
def next(self):
rc = self.getEvent()
if rc:
return rc
raise StopIteration
def __iter__(self):
return self
def expandNode(self, node):
event = self.getEvent()
parents = [node]
while event:
token, cur_node = event
if cur_node is node:
return
if token != END_ELEMENT:
parents[-1].appendChild(cur_node)
if token == START_ELEMENT:
parents.append(cur_node)
elif token == END_ELEMENT:
del parents[-1]
event = self.getEvent()
def getEvent(self):
# use IncrementalParser interface, so we get the desired
# pull effect
if not self.pulldom.firstEvent[1]:
self.pulldom.lastEvent = self.pulldom.firstEvent
while not self.pulldom.firstEvent[1]:
buf = self.stream.read(self.bufsize)
if not buf:
self.parser.close()
return None
self.parser.feed(buf)
rc = self.pulldom.firstEvent[1][0]
self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1]
return rc
def _slurp(self):
""" Fallback replacement for getEvent() using the
standard SAX2 interface, which means we slurp the
SAX events into memory (no performance gain, but
we are compatible to all SAX parsers).
"""
self.parser.parse(self.stream)
self.getEvent = self._emit
return self._emit()
def _emit(self):
""" Fallback replacement for getEvent() that emits
the events that _slurp() read previously.
"""
rc = self.pulldom.firstEvent[1][0]
self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1]
return rc
def clear(self):
"""clear(): Explicitly release parsing objects"""
self.pulldom.clear()
del self.pulldom
self.parser = None
self.stream = None
class SAX2DOM(PullDOM):
def startElementNS(self, name, tagName , attrs):
PullDOM.startElementNS(self, name, tagName, attrs)
curNode = self.elementStack[-1]
parentNode = self.elementStack[-2]
parentNode.appendChild(curNode)
def startElement(self, name, attrs):
PullDOM.startElement(self, name, attrs)
curNode = self.elementStack[-1]
parentNode = self.elementStack[-2]
parentNode.appendChild(curNode)
def processingInstruction(self, target, data):
PullDOM.processingInstruction(self, target, data)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
def ignorableWhitespace(self, chars):
PullDOM.ignorableWhitespace(self, chars)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
def characters(self, chars):
PullDOM.characters(self, chars)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
default_bufsize = (2 ** 14) - 20
def parse(stream_or_string, parser=None, bufsize=None):
if bufsize is None:
bufsize = default_bufsize
if type(stream_or_string) in _StringTypes:
stream = open(stream_or_string)
else:
stream = stream_or_string
if not parser:
parser = xml.sax.make_parser()
return DOMEventStream(stream, parser, bufsize)
def parseString(string, parser=None):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
bufsize = len(string)
buf = StringIO(string)
if not parser:
parser = xml.sax.make_parser()
return DOMEventStream(buf, parser, bufsize)
|
ministryofjustice/cla_public | refs/heads/master | cla_public/middleware.py | 1 | import logging
logging.basicConfig()
log = logging.getLogger(__name__)
|
Flimm/linkchecker | refs/heads/master | linkcheck/winutil.py | 9 | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2010-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Windows utility functions."""
def get_shell_folder (name):
"""Get Windows Shell Folder locations from the registry."""
try:
import _winreg as winreg
except ImportError:
import winreg
lm = winreg.ConnectRegistry(None, winreg.HKEY_CURRENT_USER)
try:
key = winreg.OpenKey(lm, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders")
try:
return winreg.QueryValueEx(key, name)[0]
finally:
key.Close()
finally:
lm.Close()
|
density215/d215-miniblog | refs/heads/master | django/contrib/comments/templatetags/comments.py | 309 | from django import template
from django.template.loader import render_to_string
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib import comments
from django.utils.encoding import smart_unicode
register = template.Library()
class BaseCommentNode(template.Node):
"""
Base helper class (abstract) for handling the get_comment_* template tags.
Looks a bit strange, but the subclasses below should make this a bit more
obvious.
"""
#@classmethod
def handle_token(cls, parser, token):
"""Class method to parse get_comment_list/count/form and return a Node."""
tokens = token.contents.split()
if tokens[1] != 'for':
raise template.TemplateSyntaxError("Second argument in %r tag must be 'for'" % tokens[0])
# {% get_whatever for obj as varname %}
if len(tokens) == 5:
if tokens[3] != 'as':
raise template.TemplateSyntaxError("Third argument in %r must be 'as'" % tokens[0])
return cls(
object_expr = parser.compile_filter(tokens[2]),
as_varname = tokens[4],
)
# {% get_whatever for app.model pk as varname %}
elif len(tokens) == 6:
if tokens[4] != 'as':
raise template.TemplateSyntaxError("Fourth argument in %r must be 'as'" % tokens[0])
return cls(
ctype = BaseCommentNode.lookup_content_type(tokens[2], tokens[0]),
object_pk_expr = parser.compile_filter(tokens[3]),
as_varname = tokens[5]
)
else:
raise template.TemplateSyntaxError("%r tag requires 4 or 5 arguments" % tokens[0])
handle_token = classmethod(handle_token)
#@staticmethod
def lookup_content_type(token, tagname):
try:
app, model = token.split('.')
return ContentType.objects.get(app_label=app, model=model)
except ValueError:
raise template.TemplateSyntaxError("Third argument in %r must be in the format 'app.model'" % tagname)
except ContentType.DoesNotExist:
raise template.TemplateSyntaxError("%r tag has non-existant content-type: '%s.%s'" % (tagname, app, model))
lookup_content_type = staticmethod(lookup_content_type)
def __init__(self, ctype=None, object_pk_expr=None, object_expr=None, as_varname=None, comment=None):
if ctype is None and object_expr is None:
raise template.TemplateSyntaxError("Comment nodes must be given either a literal object or a ctype and object pk.")
self.comment_model = comments.get_model()
self.as_varname = as_varname
self.ctype = ctype
self.object_pk_expr = object_pk_expr
self.object_expr = object_expr
self.comment = comment
def render(self, context):
qs = self.get_query_set(context)
context[self.as_varname] = self.get_context_value_from_queryset(context, qs)
return ''
def get_query_set(self, context):
ctype, object_pk = self.get_target_ctype_pk(context)
if not object_pk:
return self.comment_model.objects.none()
qs = self.comment_model.objects.filter(
content_type = ctype,
object_pk = smart_unicode(object_pk),
site__pk = settings.SITE_ID,
)
# The is_public and is_removed fields are implementation details of the
# built-in comment model's spam filtering system, so they might not
# be present on a custom comment model subclass. If they exist, we
# should filter on them.
field_names = [f.name for f in self.comment_model._meta.fields]
if 'is_public' in field_names:
qs = qs.filter(is_public=True)
if getattr(settings, 'COMMENTS_HIDE_REMOVED', True) and 'is_removed' in field_names:
qs = qs.filter(is_removed=False)
return qs
def get_target_ctype_pk(self, context):
if self.object_expr:
try:
obj = self.object_expr.resolve(context)
except template.VariableDoesNotExist:
return None, None
return ContentType.objects.get_for_model(obj), obj.pk
else:
return self.ctype, self.object_pk_expr.resolve(context, ignore_failures=True)
def get_context_value_from_queryset(self, context, qs):
"""Subclasses should override this."""
raise NotImplementedError
class CommentListNode(BaseCommentNode):
"""Insert a list of comments into the context."""
def get_context_value_from_queryset(self, context, qs):
return list(qs)
class CommentCountNode(BaseCommentNode):
"""Insert a count of comments into the context."""
def get_context_value_from_queryset(self, context, qs):
return qs.count()
class CommentFormNode(BaseCommentNode):
"""Insert a form for the comment model into the context."""
def get_form(self, context):
ctype, object_pk = self.get_target_ctype_pk(context)
if object_pk:
return comments.get_form()(ctype.get_object_for_this_type(pk=object_pk))
else:
return None
def render(self, context):
context[self.as_varname] = self.get_form(context)
return ''
class RenderCommentFormNode(CommentFormNode):
"""Render the comment form directly"""
#@classmethod
def handle_token(cls, parser, token):
"""Class method to parse render_comment_form and return a Node."""
tokens = token.contents.split()
if tokens[1] != 'for':
raise template.TemplateSyntaxError("Second argument in %r tag must be 'for'" % tokens[0])
# {% render_comment_form for obj %}
if len(tokens) == 3:
return cls(object_expr=parser.compile_filter(tokens[2]))
# {% render_comment_form for app.models pk %}
elif len(tokens) == 4:
return cls(
ctype = BaseCommentNode.lookup_content_type(tokens[2], tokens[0]),
object_pk_expr = parser.compile_filter(tokens[3])
)
handle_token = classmethod(handle_token)
def render(self, context):
ctype, object_pk = self.get_target_ctype_pk(context)
if object_pk:
template_search_list = [
"comments/%s/%s/form.html" % (ctype.app_label, ctype.model),
"comments/%s/form.html" % ctype.app_label,
"comments/form.html"
]
context.push()
formstr = render_to_string(template_search_list, {"form" : self.get_form(context)}, context)
context.pop()
return formstr
else:
return ''
class RenderCommentListNode(CommentListNode):
"""Render the comment list directly"""
#@classmethod
def handle_token(cls, parser, token):
"""Class method to parse render_comment_list and return a Node."""
tokens = token.contents.split()
if tokens[1] != 'for':
raise template.TemplateSyntaxError("Second argument in %r tag must be 'for'" % tokens[0])
# {% render_comment_list for obj %}
if len(tokens) == 3:
return cls(object_expr=parser.compile_filter(tokens[2]))
# {% render_comment_list for app.models pk %}
elif len(tokens) == 4:
return cls(
ctype = BaseCommentNode.lookup_content_type(tokens[2], tokens[0]),
object_pk_expr = parser.compile_filter(tokens[3])
)
handle_token = classmethod(handle_token)
def render(self, context):
ctype, object_pk = self.get_target_ctype_pk(context)
if object_pk:
template_search_list = [
"comments/%s/%s/list.html" % (ctype.app_label, ctype.model),
"comments/%s/list.html" % ctype.app_label,
"comments/list.html"
]
qs = self.get_query_set(context)
context.push()
liststr = render_to_string(template_search_list, {
"comment_list" : self.get_context_value_from_queryset(context, qs)
}, context)
context.pop()
return liststr
else:
return ''
# We could just register each classmethod directly, but then we'd lose out on
# the automagic docstrings-into-admin-docs tricks. So each node gets a cute
# wrapper function that just exists to hold the docstring.
#@register.tag
def get_comment_count(parser, token):
"""
Gets the comment count for the given params and populates the template
context with a variable containing that value, whose name is defined by the
'as' clause.
Syntax::
{% get_comment_count for [object] as [varname] %}
{% get_comment_count for [app].[model] [object_id] as [varname] %}
Example usage::
{% get_comment_count for event as comment_count %}
{% get_comment_count for calendar.event event.id as comment_count %}
{% get_comment_count for calendar.event 17 as comment_count %}
"""
return CommentCountNode.handle_token(parser, token)
#@register.tag
def get_comment_list(parser, token):
"""
Gets the list of comments for the given params and populates the template
context with a variable containing that value, whose name is defined by the
'as' clause.
Syntax::
{% get_comment_list for [object] as [varname] %}
{% get_comment_list for [app].[model] [object_id] as [varname] %}
Example usage::
{% get_comment_list for event as comment_list %}
{% for comment in comment_list %}
...
{% endfor %}
"""
return CommentListNode.handle_token(parser, token)
#@register.tag
def render_comment_list(parser, token):
"""
Render the comment list (as returned by ``{% get_comment_list %}``)
through the ``comments/list.html`` template
Syntax::
{% render_comment_list for [object] %}
{% render_comment_list for [app].[model] [object_id] %}
Example usage::
{% render_comment_list for event %}
"""
return RenderCommentListNode.handle_token(parser, token)
#@register.tag
def get_comment_form(parser, token):
"""
Get a (new) form object to post a new comment.
Syntax::
{% get_comment_form for [object] as [varname] %}
{% get_comment_form for [app].[model] [object_id] as [varname] %}
"""
return CommentFormNode.handle_token(parser, token)
#@register.tag
def render_comment_form(parser, token):
"""
Render the comment form (as returned by ``{% render_comment_form %}``) through
the ``comments/form.html`` template.
Syntax::
{% render_comment_form for [object] %}
{% render_comment_form for [app].[model] [object_id] %}
"""
return RenderCommentFormNode.handle_token(parser, token)
#@register.simple_tag
def comment_form_target():
"""
Get the target URL for the comment form.
Example::
<form action="{% comment_form_target %}" method="post">
"""
return comments.get_form_target()
#@register.simple_tag
def get_comment_permalink(comment, anchor_pattern=None):
"""
Get the permalink for a comment, optionally specifying the format of the
named anchor to be appended to the end of the URL.
Example::
{{ get_comment_permalink comment "#c%(id)s-by-%(user_name)s" }}
"""
if anchor_pattern:
return comment.get_absolute_url(anchor_pattern)
return comment.get_absolute_url()
register.tag(get_comment_count)
register.tag(get_comment_list)
register.tag(get_comment_form)
register.tag(render_comment_form)
register.simple_tag(comment_form_target)
register.simple_tag(get_comment_permalink)
register.tag(render_comment_list)
|
ValvePython/steam | refs/heads/master | steam/protobufs/content_manifest_pb2.py | 1 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: content_manifest.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='content_manifest.proto',
package='',
syntax='proto2',
serialized_options=_b('H\001\220\001\000'),
serialized_pb=_b('\n\x16\x63ontent_manifest.proto\"\xef\x02\n\x16\x43ontentManifestPayload\x12\x35\n\x08mappings\x18\x01 \x03(\x0b\x32#.ContentManifestPayload.FileMapping\x1a\x9d\x02\n\x0b\x46ileMapping\x12\x10\n\x08\x66ilename\x18\x01 \x01(\t\x12\x0c\n\x04size\x18\x02 \x01(\x04\x12\r\n\x05\x66lags\x18\x03 \x01(\r\x12\x14\n\x0csha_filename\x18\x04 \x01(\x0c\x12\x13\n\x0bsha_content\x18\x05 \x01(\x0c\x12=\n\x06\x63hunks\x18\x06 \x03(\x0b\x32-.ContentManifestPayload.FileMapping.ChunkData\x12\x12\n\nlinktarget\x18\x07 \x01(\t\x1a\x61\n\tChunkData\x12\x0b\n\x03sha\x18\x01 \x01(\x0c\x12\x0b\n\x03\x63rc\x18\x02 \x01(\x07\x12\x0e\n\x06offset\x18\x03 \x01(\x04\x12\x13\n\x0b\x63\x62_original\x18\x04 \x01(\r\x12\x15\n\rcb_compressed\x18\x05 \x01(\r\"\xec\x01\n\x17\x43ontentManifestMetadata\x12\x10\n\x08\x64\x65pot_id\x18\x01 \x01(\r\x12\x14\n\x0cgid_manifest\x18\x02 \x01(\x04\x12\x15\n\rcreation_time\x18\x03 \x01(\r\x12\x1b\n\x13\x66ilenames_encrypted\x18\x04 \x01(\x08\x12\x18\n\x10\x63\x62_disk_original\x18\x05 \x01(\x04\x12\x1a\n\x12\x63\x62_disk_compressed\x18\x06 \x01(\x04\x12\x15\n\runique_chunks\x18\x07 \x01(\r\x12\x15\n\rcrc_encrypted\x18\x08 \x01(\r\x12\x11\n\tcrc_clear\x18\t \x01(\r\"-\n\x18\x43ontentManifestSignature\x12\x11\n\tsignature\x18\x01 \x01(\x0c\"\x84\x03\n\x12\x43ontentDeltaChunks\x12\x10\n\x08\x64\x65pot_id\x18\x01 \x01(\r\x12\x1a\n\x12manifest_id_source\x18\x02 \x01(\x04\x12\x1a\n\x12manifest_id_target\x18\x03 \x01(\x04\x12\x33\n\x0b\x64\x65ltaChunks\x18\x04 \x03(\x0b\x32\x1e.ContentDeltaChunks.DeltaChunk\x12h\n\x13\x63hunk_data_location\x18\x05 \x01(\x0e\x32\x1f.EContentDeltaChunkDataLocation:*k_EContentDeltaChunkDataLocationInProtobuf\x1a\x84\x01\n\nDeltaChunk\x12\x12\n\nsha_source\x18\x01 \x01(\x0c\x12\x12\n\nsha_target\x18\x02 \x01(\x0c\x12\x15\n\rsize_original\x18\x03 \x01(\r\x12\x14\n\x0cpatch_method\x18\x04 \x01(\r\x12\r\n\x05\x63hunk\x18\x05 \x01(\x0c\x12\x12\n\nsize_delta\x18\x06 \x01(\r*\x83\x01\n\x1e\x45\x43ontentDeltaChunkDataLocation\x12.\n*k_EContentDeltaChunkDataLocationInProtobuf\x10\x00\x12\x31\n-k_EContentDeltaChunkDataLocationAfterProtobuf\x10\x01\x42\x05H\x01\x90\x01\x00')
)
_ECONTENTDELTACHUNKDATALOCATION = _descriptor.EnumDescriptor(
name='EContentDeltaChunkDataLocation',
full_name='EContentDeltaChunkDataLocation',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='k_EContentDeltaChunkDataLocationInProtobuf', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='k_EContentDeltaChunkDataLocationAfterProtobuf', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=1074,
serialized_end=1205,
)
_sym_db.RegisterEnumDescriptor(_ECONTENTDELTACHUNKDATALOCATION)
EContentDeltaChunkDataLocation = enum_type_wrapper.EnumTypeWrapper(_ECONTENTDELTACHUNKDATALOCATION)
k_EContentDeltaChunkDataLocationInProtobuf = 0
k_EContentDeltaChunkDataLocationAfterProtobuf = 1
_CONTENTMANIFESTPAYLOAD_FILEMAPPING_CHUNKDATA = _descriptor.Descriptor(
name='ChunkData',
full_name='ContentManifestPayload.FileMapping.ChunkData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='sha', full_name='ContentManifestPayload.FileMapping.ChunkData.sha', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='crc', full_name='ContentManifestPayload.FileMapping.ChunkData.crc', index=1,
number=2, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='offset', full_name='ContentManifestPayload.FileMapping.ChunkData.offset', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cb_original', full_name='ContentManifestPayload.FileMapping.ChunkData.cb_original', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cb_compressed', full_name='ContentManifestPayload.FileMapping.ChunkData.cb_compressed', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=297,
serialized_end=394,
)
_CONTENTMANIFESTPAYLOAD_FILEMAPPING = _descriptor.Descriptor(
name='FileMapping',
full_name='ContentManifestPayload.FileMapping',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='filename', full_name='ContentManifestPayload.FileMapping.filename', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='size', full_name='ContentManifestPayload.FileMapping.size', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='flags', full_name='ContentManifestPayload.FileMapping.flags', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sha_filename', full_name='ContentManifestPayload.FileMapping.sha_filename', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sha_content', full_name='ContentManifestPayload.FileMapping.sha_content', index=4,
number=5, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='chunks', full_name='ContentManifestPayload.FileMapping.chunks', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='linktarget', full_name='ContentManifestPayload.FileMapping.linktarget', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_CONTENTMANIFESTPAYLOAD_FILEMAPPING_CHUNKDATA, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=109,
serialized_end=394,
)
_CONTENTMANIFESTPAYLOAD = _descriptor.Descriptor(
name='ContentManifestPayload',
full_name='ContentManifestPayload',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='mappings', full_name='ContentManifestPayload.mappings', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_CONTENTMANIFESTPAYLOAD_FILEMAPPING, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=27,
serialized_end=394,
)
_CONTENTMANIFESTMETADATA = _descriptor.Descriptor(
name='ContentManifestMetadata',
full_name='ContentManifestMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='depot_id', full_name='ContentManifestMetadata.depot_id', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gid_manifest', full_name='ContentManifestMetadata.gid_manifest', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='creation_time', full_name='ContentManifestMetadata.creation_time', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='filenames_encrypted', full_name='ContentManifestMetadata.filenames_encrypted', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cb_disk_original', full_name='ContentManifestMetadata.cb_disk_original', index=4,
number=5, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cb_disk_compressed', full_name='ContentManifestMetadata.cb_disk_compressed', index=5,
number=6, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='unique_chunks', full_name='ContentManifestMetadata.unique_chunks', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='crc_encrypted', full_name='ContentManifestMetadata.crc_encrypted', index=7,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='crc_clear', full_name='ContentManifestMetadata.crc_clear', index=8,
number=9, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=397,
serialized_end=633,
)
_CONTENTMANIFESTSIGNATURE = _descriptor.Descriptor(
name='ContentManifestSignature',
full_name='ContentManifestSignature',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='signature', full_name='ContentManifestSignature.signature', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=635,
serialized_end=680,
)
_CONTENTDELTACHUNKS_DELTACHUNK = _descriptor.Descriptor(
name='DeltaChunk',
full_name='ContentDeltaChunks.DeltaChunk',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='sha_source', full_name='ContentDeltaChunks.DeltaChunk.sha_source', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sha_target', full_name='ContentDeltaChunks.DeltaChunk.sha_target', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='size_original', full_name='ContentDeltaChunks.DeltaChunk.size_original', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='patch_method', full_name='ContentDeltaChunks.DeltaChunk.patch_method', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='chunk', full_name='ContentDeltaChunks.DeltaChunk.chunk', index=4,
number=5, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='size_delta', full_name='ContentDeltaChunks.DeltaChunk.size_delta', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=939,
serialized_end=1071,
)
_CONTENTDELTACHUNKS = _descriptor.Descriptor(
name='ContentDeltaChunks',
full_name='ContentDeltaChunks',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='depot_id', full_name='ContentDeltaChunks.depot_id', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='manifest_id_source', full_name='ContentDeltaChunks.manifest_id_source', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='manifest_id_target', full_name='ContentDeltaChunks.manifest_id_target', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='deltaChunks', full_name='ContentDeltaChunks.deltaChunks', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='chunk_data_location', full_name='ContentDeltaChunks.chunk_data_location', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_CONTENTDELTACHUNKS_DELTACHUNK, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=683,
serialized_end=1071,
)
_CONTENTMANIFESTPAYLOAD_FILEMAPPING_CHUNKDATA.containing_type = _CONTENTMANIFESTPAYLOAD_FILEMAPPING
_CONTENTMANIFESTPAYLOAD_FILEMAPPING.fields_by_name['chunks'].message_type = _CONTENTMANIFESTPAYLOAD_FILEMAPPING_CHUNKDATA
_CONTENTMANIFESTPAYLOAD_FILEMAPPING.containing_type = _CONTENTMANIFESTPAYLOAD
_CONTENTMANIFESTPAYLOAD.fields_by_name['mappings'].message_type = _CONTENTMANIFESTPAYLOAD_FILEMAPPING
_CONTENTDELTACHUNKS_DELTACHUNK.containing_type = _CONTENTDELTACHUNKS
_CONTENTDELTACHUNKS.fields_by_name['deltaChunks'].message_type = _CONTENTDELTACHUNKS_DELTACHUNK
_CONTENTDELTACHUNKS.fields_by_name['chunk_data_location'].enum_type = _ECONTENTDELTACHUNKDATALOCATION
DESCRIPTOR.message_types_by_name['ContentManifestPayload'] = _CONTENTMANIFESTPAYLOAD
DESCRIPTOR.message_types_by_name['ContentManifestMetadata'] = _CONTENTMANIFESTMETADATA
DESCRIPTOR.message_types_by_name['ContentManifestSignature'] = _CONTENTMANIFESTSIGNATURE
DESCRIPTOR.message_types_by_name['ContentDeltaChunks'] = _CONTENTDELTACHUNKS
DESCRIPTOR.enum_types_by_name['EContentDeltaChunkDataLocation'] = _ECONTENTDELTACHUNKDATALOCATION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ContentManifestPayload = _reflection.GeneratedProtocolMessageType('ContentManifestPayload', (_message.Message,), dict(
FileMapping = _reflection.GeneratedProtocolMessageType('FileMapping', (_message.Message,), dict(
ChunkData = _reflection.GeneratedProtocolMessageType('ChunkData', (_message.Message,), dict(
DESCRIPTOR = _CONTENTMANIFESTPAYLOAD_FILEMAPPING_CHUNKDATA,
__module__ = 'content_manifest_pb2'
# @@protoc_insertion_point(class_scope:ContentManifestPayload.FileMapping.ChunkData)
))
,
DESCRIPTOR = _CONTENTMANIFESTPAYLOAD_FILEMAPPING,
__module__ = 'content_manifest_pb2'
# @@protoc_insertion_point(class_scope:ContentManifestPayload.FileMapping)
))
,
DESCRIPTOR = _CONTENTMANIFESTPAYLOAD,
__module__ = 'content_manifest_pb2'
# @@protoc_insertion_point(class_scope:ContentManifestPayload)
))
_sym_db.RegisterMessage(ContentManifestPayload)
_sym_db.RegisterMessage(ContentManifestPayload.FileMapping)
_sym_db.RegisterMessage(ContentManifestPayload.FileMapping.ChunkData)
ContentManifestMetadata = _reflection.GeneratedProtocolMessageType('ContentManifestMetadata', (_message.Message,), dict(
DESCRIPTOR = _CONTENTMANIFESTMETADATA,
__module__ = 'content_manifest_pb2'
# @@protoc_insertion_point(class_scope:ContentManifestMetadata)
))
_sym_db.RegisterMessage(ContentManifestMetadata)
ContentManifestSignature = _reflection.GeneratedProtocolMessageType('ContentManifestSignature', (_message.Message,), dict(
DESCRIPTOR = _CONTENTMANIFESTSIGNATURE,
__module__ = 'content_manifest_pb2'
# @@protoc_insertion_point(class_scope:ContentManifestSignature)
))
_sym_db.RegisterMessage(ContentManifestSignature)
ContentDeltaChunks = _reflection.GeneratedProtocolMessageType('ContentDeltaChunks', (_message.Message,), dict(
DeltaChunk = _reflection.GeneratedProtocolMessageType('DeltaChunk', (_message.Message,), dict(
DESCRIPTOR = _CONTENTDELTACHUNKS_DELTACHUNK,
__module__ = 'content_manifest_pb2'
# @@protoc_insertion_point(class_scope:ContentDeltaChunks.DeltaChunk)
))
,
DESCRIPTOR = _CONTENTDELTACHUNKS,
__module__ = 'content_manifest_pb2'
# @@protoc_insertion_point(class_scope:ContentDeltaChunks)
))
_sym_db.RegisterMessage(ContentDeltaChunks)
_sym_db.RegisterMessage(ContentDeltaChunks.DeltaChunk)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
d9w/6858-android-intents | refs/heads/master | analyzer/androguard/core/analysis/sign.py | 7 | # This file is part of Androguard.
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Androguard is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Androguard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Androguard. If not, see <http://www.gnu.org/licenses/>.
from androguard.core.analysis.analysis import TAINTED_PACKAGE_CREATE, TAINTED_PACKAGE_CALL
from androguard.core.bytecodes import dvm
TAINTED_PACKAGE_INTERNAL_CALL = 2
FIELD_ACCESS = { "R" : 0, "W" : 1 }
PACKAGE_ACCESS = { TAINTED_PACKAGE_CREATE : 0, TAINTED_PACKAGE_CALL : 1, TAINTED_PACKAGE_INTERNAL_CALL : 2 }
class Sign :
def __init__(self) :
self.levels = {}
self.hlevels = []
def add(self, level, value) :
self.levels[ level ] = value
self.hlevels.append( level )
def get_level(self, l) :
return self.levels[ "L%d" % l ]
def get_string(self) :
buff = ""
for i in self.hlevels :
buff += self.levels[ i ]
return buff
def get_list(self) :
return self.levels[ "sequencebb" ]
class Signature :
def __init__(self, vmx) :
self.vmx = vmx
self.tainted_packages = self.vmx.get_tainted_packages()
self.tainted_variables = self.vmx.get_tainted_variables()
self._cached_signatures = {}
self._cached_fields = {}
self._cached_packages = {}
self._global_cached = {}
self.levels = {
# Classical method signature with basic blocks, strings, fields, packages
"L0" : {
0 : ( "_get_strings_a", "_get_fields_a", "_get_packages_a" ),
1 : ( "_get_strings_pa", "_get_fields_a", "_get_packages_a" ),
2 : ( "_get_strings_a", "_get_fields_a", "_get_packages_pa_1" ),
3 : ( "_get_strings_a", "_get_fields_a", "_get_packages_pa_2" ),
},
# strings
"L1" : [ "_get_strings_a1" ],
# exceptions
"L2" : [ "_get_exceptions" ],
# fill array data
"L3" : [ "_get_fill_array_data" ],
}
self.classes_names = None
self._init_caches()
def _get_method_info(self, m) :
m1 = m.get_method()
return "%s-%s-%s" % (m1.get_class_name(), m1.get_name(), m1.get_descriptor())
def _get_sequence_bb(self, analysis_method) :
l = []
for i in analysis_method.basic_blocks.get() :
buff = ""
instructions = [j for j in i.get_instructions()]
if len(instructions) > 5 :
for ins in instructions :
buff += ins.get_name()
if buff != "" :
l.append( buff )
return l
def _get_hex(self, analysis_method) :
code = analysis_method.get_method().get_code()
if code == None :
return ""
buff = ""
for i in code.get_bc().get_instructions() :
buff += dvm.clean_name_instruction( i )
buff += dvm.static_operand_instruction( i )
return buff
def _get_bb(self, analysis_method, functions, options) :
bbs = []
for b in analysis_method.basic_blocks.get() :
l = []
l.append( (b.start, "B") )
l.append( (b.start, "[") )
internal = []
op_value = b.get_last().get_op_value()
# return
if op_value >= 0x0e and op_value <= 0x11 :
internal.append( (b.end-1, "R") )
# if
elif op_value >= 0x32 and op_value <= 0x3d :
internal.append( (b.end-1, "I") )
# goto
elif op_value >= 0x28 and op_value <= 0x2a :
internal.append( (b.end-1, "G") )
# sparse or packed switch
elif op_value >= 0x2b and op_value <= 0x2c :
internal.append( (b.end-1, "G") )
for f in functions :
try :
internal.extend( getattr( self, f )( analysis_method, options ) )
except TypeError :
internal.extend( getattr( self, f )( analysis_method ) )
internal.sort()
for i in internal :
if i[0] >= b.start and i[0] < b.end :
l.append( i )
del internal
l.append( (b.end, "]") )
bbs.append( ''.join(i[1] for i in l) )
return bbs
def _init_caches(self) :
if self._cached_fields == {} :
for f_t, f in self.tainted_variables.get_fields() :
self._cached_fields[ f ] = f_t.get_paths_length()
n = 0
for f in sorted( self._cached_fields ) :
self._cached_fields[ f ] = n
n += 1
if self._cached_packages == {} :
for m_t, m in self.tainted_packages.get_packages() :
self._cached_packages[ m ] = m_t.get_paths_length()
n = 0
for m in sorted( self._cached_packages ) :
self._cached_packages[ m ] = n
n += 1
def _get_fill_array_data(self, analysis_method) :
buff = ""
for b in analysis_method.basic_blocks.get() :
for i in b.get_instructions() :
if i.get_name() == "FILL-ARRAY-DATA" :
buff_tmp = i.get_operands()
for j in range(0, len(buff_tmp)) :
buff += "\\x%02x" % ord( buff_tmp[j] )
return buff
def _get_exceptions(self, analysis_method) :
buff = ""
method = analysis_method.get_method()
code = method.get_code()
if code == None or code.get_tries_size() <= 0 :
return buff
handler_catch_list = code.get_handlers()
for handler_catch in handler_catch_list.get_list() :
for handler in handler_catch.get_handlers() :
buff += analysis_method.get_vm().get_cm_type( handler.get_type_idx() )
return buff
def _get_strings_a1(self, analysis_method) :
buff = ""
strings_method = self.tainted_variables.get_strings_by_method( analysis_method.get_method() )
for s in strings_method :
for path in strings_method[s] :
buff += s.replace('\n', ' ')
return buff
def _get_strings_pa(self, analysis_method) :
l = []
strings_method = self.tainted_variables.get_strings_by_method( analysis_method.get_method() )
for s in strings_method :
for path in strings_method[s] :
l.append( ( path[1], "S%d" % len(s) ) )
return l
def _get_strings_a(self, analysis_method) :
key = "SA-%s" % self._get_method_info(analysis_method)
if key in self._global_cached :
return self._global_cached[ key ]
l = []
strings_method = self.tainted_variables.get_strings_by_method( analysis_method.get_method() )
for s in strings_method :
for path in strings_method[s] :
l.append( ( path[1], "S") )
self._global_cached[ key ] = l
return l
def _get_fields_a(self, analysis_method) :
key = "FA-%s" % self._get_method_info(analysis_method)
if key in self._global_cached :
return self._global_cached[ key ]
fields_method = self.tainted_variables.get_fields_by_method( analysis_method.get_method() )
l = []
for f in fields_method :
for path in fields_method[ f ] :
l.append( (path[1], "F%d" % FIELD_ACCESS[ path[0] ]) )
self._global_cached[ key ] = l
return l
def _get_packages_a(self, analysis_method) :
packages_method = self.tainted_packages.get_packages_by_method( analysis_method.get_method() )
l = []
for m in packages_method :
for path in packages_method[ m ] :
l.append( (path.get_idx(), "P%s" % (PACKAGE_ACCESS[ path.get_access_flag() ]) ) )
return l
def _get_packages(self, analysis_method, include_packages) :
l = self._get_packages_pa_1( analysis_method, include_packages )
return "".join([ i[1] for i in l ])
def _get_packages_pa_1(self, analysis_method, include_packages) :
key = "PA1-%s-%s" % (self._get_method_info(analysis_method), include_packages)
if key in self._global_cached :
return self._global_cached[ key ]
packages_method = self.tainted_packages.get_packages_by_method( analysis_method.get_method() )
if self.classes_names == None :
self.classes_names = analysis_method.get_vm().get_classes_names()
l = []
for m in packages_method :
for path in packages_method[ m ] :
present = False
for i in include_packages :
if m.find(i) == 0 :
present = True
break
if path.get_access_flag() == 1 :
dst_class_name, dst_method_name, dst_descriptor = path.get_dst( analysis_method.get_vm().get_class_manager() )
if dst_class_name in self.classes_names :
l.append( (path.get_idx(), "P%s" % (PACKAGE_ACCESS[ 2 ]) ) )
else :
if present == True :
l.append( (path.get_idx(), "P%s{%s%s%s}" % (PACKAGE_ACCESS[ path.get_access_flag() ], dst_class_name, dst_method_name, dst_descriptor ) ) )
else :
l.append( (path.get_idx(), "P%s" % (PACKAGE_ACCESS[ path.get_access_flag() ]) ) )
else :
if present == True :
l.append( (path.get_idx(), "P%s{%s}" % (PACKAGE_ACCESS[ path.get_access_flag() ], m) ) )
else :
l.append( (path.get_idx(), "P%s" % (PACKAGE_ACCESS[ path.get_access_flag() ]) ) )
self._global_cached[ key ] = l
return l
def _get_packages_pa_2(self, analysis_method, include_packages) :
packages_method = self.tainted_packages.get_packages_by_method( analysis_method.get_method() )
l = []
for m in packages_method :
for path in packages_method[ m ] :
present = False
for i in include_packages :
if m.find(i) == 0 :
present = True
break
if present == True :
l.append( (path.get_idx(), "P%s" % (PACKAGE_ACCESS[ path.get_access_flag() ]) ) )
continue
if path.get_access_flag() == 1 :
dst_class_name, dst_method_name, dst_descriptor = path.get_dst( analysis_method.get_vm().get_class_manager() )
l.append( (path.get_idx(), "P%s{%s%s%s}" % (PACKAGE_ACCESS[ path.get_access_flag() ], dst_class_name, dst_method_name, dst_descriptor ) ) )
else :
l.append( (path.get_idx(), "P%s{%s}" % (PACKAGE_ACCESS[ path.get_access_flag() ], m) ) )
return l
def get_method(self, analysis_method, signature_type, signature_arguments={}) :
key = "%s-%s-%s" % (self._get_method_info(analysis_method), signature_type, signature_arguments)
if key in self._cached_signatures :
return self._cached_signatures[ key ]
s = Sign()
#print signature_type, signature_arguments
for i in signature_type.split(":") :
# print i, signature_arguments[ i ]
if i == "L0" :
_type = self.levels[ i ][ signature_arguments[ i ][ "type" ] ]
try :
_arguments = signature_arguments[ i ][ "arguments" ]
except KeyError :
_arguments = []
value = self._get_bb( analysis_method, _type, _arguments )
s.add( i, ''.join(z for z in value) )
elif i == "L4" :
try :
_arguments = signature_arguments[ i ][ "arguments" ]
except KeyError :
_arguments = []
value = self._get_packages( analysis_method, _arguments )
s.add( i , value )
elif i == "hex" :
value = self._get_hex( analysis_method )
s.add( i, value )
elif i == "sequencebb" :
_type = ('_get_strings_a', '_get_fields_a', '_get_packages_pa_1')
_arguments = ['Landroid', 'Ljava']
#value = self._get_bb( analysis_method, _type, _arguments )
#s.add( i, value )
value = self._get_sequence_bb( analysis_method )
s.add( i, value )
else :
for f in self.levels[ i ] :
value = getattr( self, f )( analysis_method )
s.add( i, value )
self._cached_signatures[ key ] = s
return s
|
LordDamionDevil/Lony | refs/heads/master | lib/youtube_dl/update.py | 66 | from __future__ import unicode_literals
import io
import json
import traceback
import hashlib
import os
import subprocess
import sys
from zipimport import zipimporter
from .utils import encode_compat_str
from .version import __version__
def rsa_verify(message, signature, key):
from hashlib import sha256
assert isinstance(message, bytes)
byte_size = (len(bin(key[0])) - 2 + 8 - 1) // 8
signature = ('%x' % pow(int(signature, 16), key[1], key[0])).encode()
signature = (byte_size * 2 - len(signature)) * b'0' + signature
asn1 = b'3031300d060960864801650304020105000420'
asn1 += sha256(message).hexdigest().encode()
if byte_size < len(asn1) // 2 + 11:
return False
expected = b'0001' + (byte_size - len(asn1) // 2 - 3) * b'ff' + b'00' + asn1
return expected == signature
def update_self(to_screen, verbose, opener):
"""Update the program file with the latest version from the repository"""
UPDATE_URL = 'https://rg3.github.io/youtube-dl/update/'
VERSION_URL = UPDATE_URL + 'LATEST_VERSION'
JSON_URL = UPDATE_URL + 'versions.json'
UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)
if not isinstance(globals().get('__loader__'), zipimporter) and not hasattr(sys, 'frozen'):
to_screen('It looks like you installed youtube-dl with a package manager, pip, setup.py or a tarball. Please use that to update.')
return
# Check if there is a new version
try:
newversion = opener.open(VERSION_URL).read().decode('utf-8').strip()
except Exception:
if verbose:
to_screen(encode_compat_str(traceback.format_exc()))
to_screen('ERROR: can\'t find the current version. Please try again later.')
return
if newversion == __version__:
to_screen('youtube-dl is up-to-date (' + __version__ + ')')
return
# Download and check versions info
try:
versions_info = opener.open(JSON_URL).read().decode('utf-8')
versions_info = json.loads(versions_info)
except Exception:
if verbose:
to_screen(encode_compat_str(traceback.format_exc()))
to_screen('ERROR: can\'t obtain versions info. Please try again later.')
return
if 'signature' not in versions_info:
to_screen('ERROR: the versions file is not signed or corrupted. Aborting.')
return
signature = versions_info['signature']
del versions_info['signature']
if not rsa_verify(json.dumps(versions_info, sort_keys=True).encode('utf-8'), signature, UPDATES_RSA_KEY):
to_screen('ERROR: the versions file signature is invalid. Aborting.')
return
version_id = versions_info['latest']
def version_tuple(version_str):
return tuple(map(int, version_str.split('.')))
if version_tuple(__version__) >= version_tuple(version_id):
to_screen('youtube-dl is up to date (%s)' % __version__)
return
to_screen('Updating to version ' + version_id + ' ...')
version = versions_info['versions'][version_id]
print_notes(to_screen, versions_info['versions'])
# sys.executable is set to the full pathname of the exe-file for py2exe
filename = sys.executable if hasattr(sys, 'frozen') else sys.argv[0]
if not os.access(filename, os.W_OK):
to_screen('ERROR: no write permissions on %s' % filename)
return
# Py2EXE
if hasattr(sys, 'frozen'):
exe = filename
directory = os.path.dirname(exe)
if not os.access(directory, os.W_OK):
to_screen('ERROR: no write permissions on %s' % directory)
return
try:
urlh = opener.open(version['exe'][0])
newcontent = urlh.read()
urlh.close()
except (IOError, OSError):
if verbose:
to_screen(encode_compat_str(traceback.format_exc()))
to_screen('ERROR: unable to download latest version')
return
newcontent_hash = hashlib.sha256(newcontent).hexdigest()
if newcontent_hash != version['exe'][1]:
to_screen('ERROR: the downloaded file hash does not match. Aborting.')
return
try:
with open(exe + '.new', 'wb') as outf:
outf.write(newcontent)
except (IOError, OSError):
if verbose:
to_screen(encode_compat_str(traceback.format_exc()))
to_screen('ERROR: unable to write the new version')
return
try:
bat = os.path.join(directory, 'youtube-dl-updater.bat')
with io.open(bat, 'w') as batfile:
batfile.write('''
@echo off
echo Waiting for file handle to be closed ...
ping 127.0.0.1 -n 5 -w 1000 > NUL
move /Y "%s.new" "%s" > NUL
echo Updated youtube-dl to version %s.
start /b "" cmd /c del "%%~f0"&exit /b"
\n''' % (exe, exe, version_id))
subprocess.Popen([bat]) # Continues to run in the background
return # Do not show premature success messages
except (IOError, OSError):
if verbose:
to_screen(encode_compat_str(traceback.format_exc()))
to_screen('ERROR: unable to overwrite current version')
return
# Zip unix package
elif isinstance(globals().get('__loader__'), zipimporter):
try:
urlh = opener.open(version['bin'][0])
newcontent = urlh.read()
urlh.close()
except (IOError, OSError):
if verbose:
to_screen(encode_compat_str(traceback.format_exc()))
to_screen('ERROR: unable to download latest version')
return
newcontent_hash = hashlib.sha256(newcontent).hexdigest()
if newcontent_hash != version['bin'][1]:
to_screen('ERROR: the downloaded file hash does not match. Aborting.')
return
try:
with open(filename, 'wb') as outf:
outf.write(newcontent)
except (IOError, OSError):
if verbose:
to_screen(encode_compat_str(traceback.format_exc()))
to_screen('ERROR: unable to overwrite current version')
return
to_screen('Updated youtube-dl. Restart youtube-dl to use the new version.')
def get_notes(versions, fromVersion):
notes = []
for v, vdata in sorted(versions.items()):
if v > fromVersion:
notes.extend(vdata.get('notes', []))
return notes
def print_notes(to_screen, versions, fromVersion=__version__):
notes = get_notes(versions, fromVersion)
if notes:
to_screen('PLEASE NOTE:')
for note in notes:
to_screen(note)
|
abhiii5459/sympy | refs/heads/master | sympy/polys/agca/homomorphisms.py | 63 | """
Computations with homomorphisms of modules and rings.
This module implements classes for representing homomorphisms of rings and
their modules. Instead of instantiating the classes directly, you should use
the function ``homomorphism(from, to, matrix)`` to create homomorphism objects.
"""
from __future__ import print_function, division
from sympy.polys.agca.modules import (Module, FreeModule, QuotientModule,
SubModule, SubQuotientModule)
from sympy.polys.polyerrors import CoercionFailed
from sympy.core.compatibility import range
# The main computational task for module homomorphisms is kernels.
# For this reason, the concrete classes are organised by domain module type.
class ModuleHomomorphism(object):
"""
Abstract base class for module homomoprhisms. Do not instantiate.
Instead, use the ``homomorphism`` function:
>>> from sympy import QQ
>>> from sympy.abc import x
>>> from sympy.polys.agca import homomorphism
>>> F = QQ.old_poly_ring(x).free_module(2)
>>> homomorphism(F, F, [[1, 0], [0, 1]])
Matrix([
[1, 0], : QQ[x]**2 -> QQ[x]**2
[0, 1]])
Attributes:
- ring - the ring over which we are considering modules
- domain - the domain module
- codomain - the codomain module
- _ker - cached kernel
- _img - cached image
Non-implemented methods:
- _kernel
- _image
- _restrict_domain
- _restrict_codomain
- _quotient_domain
- _quotient_codomain
- _apply
- _mul_scalar
- _compose
- _add
"""
def __init__(self, domain, codomain):
if not isinstance(domain, Module):
raise TypeError('Source must be a module, got %s' % domain)
if not isinstance(codomain, Module):
raise TypeError('Target must be a module, got %s' % codomain)
if domain.ring != codomain.ring:
raise ValueError('Source and codomain must be over same ring, '
'got %s != %s' % (domain, codomain))
self.domain = domain
self.codomain = codomain
self.ring = domain.ring
self._ker = None
self._img = None
def kernel(self):
r"""
Compute the kernel of ``self``.
That is, if ``self`` is the homomorphism `\phi: M \to N`, then compute
`ker(\phi) = \{x \in M | \phi(x) = 0\}`. This is a submodule of `M`.
>>> from sympy import QQ
>>> from sympy.abc import x
>>> from sympy.polys.agca import homomorphism
>>> F = QQ.old_poly_ring(x).free_module(2)
>>> homomorphism(F, F, [[1, 0], [x, 0]]).kernel()
<[x, -1]>
"""
if self._ker is None:
self._ker = self._kernel()
return self._ker
def image(self):
r"""
Compute the image of ``self``.
That is, if ``self`` is the homomorphism `\phi: M \to N`, then compute
`im(\phi) = \{\phi(x) | x \in M \}`. This is a submodule of `N`.
>>> from sympy import QQ
>>> from sympy.abc import x
>>> from sympy.polys.agca import homomorphism
>>> F = QQ.old_poly_ring(x).free_module(2)
>>> homomorphism(F, F, [[1, 0], [x, 0]]).image() == F.submodule([1, 0])
True
"""
if self._img is None:
self._img = self._image()
return self._img
def _kernel(self):
"""Compute the kernel of ``self``."""
raise NotImplementedError
def _image(self):
"""Compute the image of ``self``."""
raise NotImplementedError
def _restrict_domain(self, sm):
"""Implementation of domain restriction."""
raise NotImplementedError
def _restrict_codomain(self, sm):
"""Implementation of codomain restriction."""
raise NotImplementedError
def _quotient_domain(self, sm):
"""Implementation of domain quotient."""
raise NotImplementedError
def _quotient_codomain(self, sm):
"""Implementation of codomain quotient."""
raise NotImplementedError
def restrict_domain(self, sm):
"""
Return ``self``, with the domain restricted to ``sm``.
Here ``sm`` has to be a submodule of ``self.domain``.
>>> from sympy import QQ
>>> from sympy.abc import x
>>> from sympy.polys.agca import homomorphism
>>> F = QQ.old_poly_ring(x).free_module(2)
>>> h = homomorphism(F, F, [[1, 0], [x, 0]])
>>> h
Matrix([
[1, x], : QQ[x]**2 -> QQ[x]**2
[0, 0]])
>>> h.restrict_domain(F.submodule([1, 0]))
Matrix([
[1, x], : <[1, 0]> -> QQ[x]**2
[0, 0]])
This is the same as just composing on the right with the submodule
inclusion:
>>> h * F.submodule([1, 0]).inclusion_hom()
Matrix([
[1, x], : <[1, 0]> -> QQ[x]**2
[0, 0]])
"""
if not self.domain.is_submodule(sm):
raise ValueError('sm must be a submodule of %s, got %s'
% (self.domain, sm))
if sm == self.domain:
return self
return self._restrict_domain(sm)
def restrict_codomain(self, sm):
"""
Return ``self``, with codomain restricted to to ``sm``.
Here ``sm`` has to be a submodule of ``self.codomain`` containing the
image.
>>> from sympy import QQ
>>> from sympy.abc import x
>>> from sympy.polys.agca import homomorphism
>>> F = QQ.old_poly_ring(x).free_module(2)
>>> h = homomorphism(F, F, [[1, 0], [x, 0]])
>>> h
Matrix([
[1, x], : QQ[x]**2 -> QQ[x]**2
[0, 0]])
>>> h.restrict_codomain(F.submodule([1, 0]))
Matrix([
[1, x], : QQ[x]**2 -> <[1, 0]>
[0, 0]])
"""
if not sm.is_submodule(self.image()):
raise ValueError('the image %s must contain sm, got %s'
% (self.image(), sm))
if sm == self.codomain:
return self
return self._restrict_codomain(sm)
def quotient_domain(self, sm):
"""
Return ``self`` with domain replaced by ``domain/sm``.
Here ``sm`` must be a submodule of ``self.kernel()``.
>>> from sympy import QQ
>>> from sympy.abc import x
>>> from sympy.polys.agca import homomorphism
>>> F = QQ.old_poly_ring(x).free_module(2)
>>> h = homomorphism(F, F, [[1, 0], [x, 0]])
>>> h
Matrix([
[1, x], : QQ[x]**2 -> QQ[x]**2
[0, 0]])
>>> h.quotient_domain(F.submodule([-x, 1]))
Matrix([
[1, x], : QQ[x]**2/<[-x, 1]> -> QQ[x]**2
[0, 0]])
"""
if not self.kernel().is_submodule(sm):
raise ValueError('kernel %s must contain sm, got %s' %
(self.kernel(), sm))
if sm.is_zero():
return self
return self._quotient_domain(sm)
def quotient_codomain(self, sm):
"""
Return ``self`` with codomain replaced by ``codomain/sm``.
Here ``sm`` must be a submodule of ``self.codomain``.
>>> from sympy import QQ
>>> from sympy.abc import x
>>> from sympy.polys.agca import homomorphism
>>> F = QQ.old_poly_ring(x).free_module(2)
>>> h = homomorphism(F, F, [[1, 0], [x, 0]])
>>> h
Matrix([
[1, x], : QQ[x]**2 -> QQ[x]**2
[0, 0]])
>>> h.quotient_codomain(F.submodule([1, 1]))
Matrix([
[1, x], : QQ[x]**2 -> QQ[x]**2/<[1, 1]>
[0, 0]])
This is the same as composing with the quotient map on the left:
>>> (F/[(1, 1)]).quotient_hom() * h
Matrix([
[1, x], : QQ[x]**2 -> QQ[x]**2/<[1, 1]>
[0, 0]])
"""
if not self.codomain.is_submodule(sm):
raise ValueError('sm must be a submodule of codomain %s, got %s'
% (self.codomain, sm))
if sm.is_zero():
return self
return self._quotient_codomain(sm)
def _apply(self, elem):
"""Apply ``self`` to ``elem``."""
raise NotImplementedError
def __call__(self, elem):
return self.codomain.convert(self._apply(self.domain.convert(elem)))
def _compose(self, oth):
"""
Compose ``self`` with ``oth``, that is, return the homomorphism
obtained by first applying then ``self``, then ``oth``.
(This method is private since in this syntax, it is non-obvious which
homomorphism is executed first.)
"""
raise NotImplementedError
def _mul_scalar(self, c):
"""Scalar multiplication. ``c`` is guaranteed in self.ring."""
raise NotImplementedError
def _add(self, oth):
"""
Homomorphism addition.
``oth`` is guaranteed to be a homomorphism with same domain/codomain.
"""
raise NotImplementedError
def _check_hom(self, oth):
"""Helper to check that oth is a homomorphism with same domain/codomain."""
if not isinstance(oth, ModuleHomomorphism):
return False
return oth.domain == self.domain and oth.codomain == self.codomain
def __mul__(self, oth):
if isinstance(oth, ModuleHomomorphism) and self.domain == oth.codomain:
return oth._compose(self)
try:
return self._mul_scalar(self.ring.convert(oth))
except CoercionFailed:
return NotImplemented
# NOTE: _compose will never be called from rmul
__rmul__ = __mul__
def __div__(self, oth):
try:
return self._mul_scalar(1/self.ring.convert(oth))
except CoercionFailed:
return NotImplemented
__truediv__ = __div__
def __add__(self, oth):
if self._check_hom(oth):
return self._add(oth)
return NotImplemented
def __sub__(self, oth):
if self._check_hom(oth):
return self._add(oth._mul_scalar(self.ring.convert(-1)))
return NotImplemented
def is_injective(self):
"""
Return True if ``self`` is injective.
That is, check if the elements of the domain are mapped to the same
codomain element.
>>> from sympy import QQ
>>> from sympy.abc import x
>>> from sympy.polys.agca import homomorphism
>>> F = QQ.old_poly_ring(x).free_module(2)
>>> h = homomorphism(F, F, [[1, 0], [x, 0]])
>>> h.is_injective()
False
>>> h.quotient_domain(h.kernel()).is_injective()
True
"""
return self.kernel().is_zero()
def is_surjective(self):
"""
Return True if ``self`` is surjective.
That is, check if every element of the codomain has at least one
preimage.
>>> from sympy import QQ
>>> from sympy.abc import x
>>> from sympy.polys.agca import homomorphism
>>> F = QQ.old_poly_ring(x).free_module(2)
>>> h = homomorphism(F, F, [[1, 0], [x, 0]])
>>> h.is_surjective()
False
>>> h.restrict_codomain(h.image()).is_surjective()
True
"""
return self.image() == self.codomain
def is_isomorphism(self):
"""
Return True if ``self`` is an isomorphism.
That is, check if every element of the codomain has precisely one
preimage. Equivalently, ``self`` is both injective and surjective.
>>> from sympy import QQ
>>> from sympy.abc import x
>>> from sympy.polys.agca import homomorphism
>>> F = QQ.old_poly_ring(x).free_module(2)
>>> h = homomorphism(F, F, [[1, 0], [x, 0]])
>>> h = h.restrict_codomain(h.image())
>>> h.is_isomorphism()
False
>>> h.quotient_domain(h.kernel()).is_isomorphism()
True
"""
return self.is_injective() and self.is_surjective()
def is_zero(self):
"""
Return True if ``self`` is a zero morphism.
That is, check if every element of the domain is mapped to zero
under self.
>>> from sympy import QQ
>>> from sympy.abc import x
>>> from sympy.polys.agca import homomorphism
>>> F = QQ.old_poly_ring(x).free_module(2)
>>> h = homomorphism(F, F, [[1, 0], [x, 0]])
>>> h.is_zero()
False
>>> h.restrict_domain(F.submodule()).is_zero()
True
>>> h.quotient_codomain(h.image()).is_zero()
True
"""
return self.image().is_zero()
def __eq__(self, oth):
try:
return (self - oth).is_zero()
except TypeError:
return False
def __ne__(self, oth):
return not (self == oth)
class MatrixHomomorphism(ModuleHomomorphism):
"""
Helper class for all homomoprhisms which are expressed via a matrix.
That is, for such homomorphisms ``domain`` is contained in a module
generated by finitely many elements `e_1, \dots, e_n`, so that the
homomorphism is determined uniquely by its action on the `e_i`. It
can thus be represented as a vector of elements of the codomain module,
or potentially a supermodule of the codomain module
(and hence conventionally as a matrix, if there is a similar interpretation
for elements of the codomain module).
Note that this class does *not* assume that the `e_i` freely generate a
submodule, nor that ``domain`` is even all of this submodule. It exists
only to unify the interface.
Do not instantiate.
Attributes:
- matrix - the list of images determining the homomorphism.
NOTE: the elements of matrix belong to either self.codomain or
self.codomain.container
Still non-implemented methods:
- kernel
- _apply
"""
def __init__(self, domain, codomain, matrix):
ModuleHomomorphism.__init__(self, domain, codomain)
if len(matrix) != domain.rank:
raise ValueError('Need to provide %s elements, got %s'
% (domain.rank, len(matrix)))
converter = self.codomain.convert
if isinstance(self.codomain, (SubModule, SubQuotientModule)):
converter = self.codomain.container.convert
self.matrix = tuple(converter(x) for x in matrix)
def _sympy_matrix(self):
"""Helper function which returns a sympy matrix ``self.matrix``."""
from sympy.matrices import Matrix
c = lambda x: x
if isinstance(self.codomain, (QuotientModule, SubQuotientModule)):
c = lambda x: x.data
return Matrix([[self.ring.to_sympy(y) for y in c(x)] for x in self.matrix]).T
def __repr__(self):
lines = repr(self._sympy_matrix()).split('\n')
t = " : %s -> %s" % (self.domain, self.codomain)
s = ' '*len(t)
n = len(lines)
for i in range(n // 2):
lines[i] += s
lines[n // 2] += t
for i in range(n//2 + 1, n):
lines[i] += s
return '\n'.join(lines)
def _restrict_domain(self, sm):
"""Implementation of domain restriction."""
return SubModuleHomomorphism(sm, self.codomain, self.matrix)
def _restrict_codomain(self, sm):
"""Implementation of codomain restriction."""
return self.__class__(self.domain, sm, self.matrix)
def _quotient_domain(self, sm):
"""Implementation of domain quotient."""
return self.__class__(self.domain/sm, self.codomain, self.matrix)
def _quotient_codomain(self, sm):
"""Implementation of codomain quotient."""
Q = self.codomain/sm
converter = Q.convert
if isinstance(self.codomain, SubModule):
converter = Q.container.convert
return self.__class__(self.domain, self.codomain/sm,
[converter(x) for x in self.matrix])
def _add(self, oth):
return self.__class__(self.domain, self.codomain,
[x + y for x, y in zip(self.matrix, oth.matrix)])
def _mul_scalar(self, c):
return self.__class__(self.domain, self.codomain, [c*x for x in self.matrix])
def _compose(self, oth):
return self.__class__(self.domain, oth.codomain, [oth(x) for x in self.matrix])
class FreeModuleHomomorphism(MatrixHomomorphism):
"""
Concrete class for homomorphisms with domain a free module or a quotient
thereof.
Do not instantiate; the constructor does not check that your data is well
defined. Use the ``homomorphism`` function instead:
>>> from sympy import QQ
>>> from sympy.abc import x
>>> from sympy.polys.agca import homomorphism
>>> F = QQ.old_poly_ring(x).free_module(2)
>>> homomorphism(F, F, [[1, 0], [0, 1]])
Matrix([
[1, 0], : QQ[x]**2 -> QQ[x]**2
[0, 1]])
"""
def _apply(self, elem):
if isinstance(self.domain, QuotientModule):
elem = elem.data
return sum(x * e for x, e in zip(elem, self.matrix))
def _image(self):
return self.codomain.submodule(*self.matrix)
def _kernel(self):
# The domain is either a free module or a quotient thereof.
# It does not matter if it is a quotient, because that won't increase
# the kernel.
# Our generators {e_i} are sent to the matrix entries {b_i}.
# The kernel is essentially the syzygy module of these {b_i}.
syz = self.image().syzygy_module()
return self.domain.submodule(*syz.gens)
class SubModuleHomomorphism(MatrixHomomorphism):
"""
Concrete class for homomorphism with domain a submodule of a free module
or a quotient thereof.
Do not instantiate; the constructor does not check that your data is well
defined. Use the ``homomorphism`` function instead:
>>> from sympy import QQ
>>> from sympy.abc import x
>>> from sympy.polys.agca import homomorphism
>>> M = QQ.old_poly_ring(x).free_module(2)*x
>>> homomorphism(M, M, [[1, 0], [0, 1]])
Matrix([
[1, 0], : <[x, 0], [0, x]> -> <[x, 0], [0, x]>
[0, 1]])
"""
def _apply(self, elem):
if isinstance(self.domain, SubQuotientModule):
elem = elem.data
return sum(x * e for x, e in zip(elem, self.matrix))
def _image(self):
return self.codomain.submodule(*[self(x) for x in self.domain.gens])
def _kernel(self):
syz = self.image().syzygy_module()
return self.domain.submodule(
*[sum(xi*gi for xi, gi in zip(s, self.domain.gens))
for s in syz.gens])
def homomorphism(domain, codomain, matrix):
r"""
Create a homomorphism object.
This function tries to build a homomorphism from ``domain`` to ``codomain``
via the matrix ``matrix``.
Examples
========
>>> from sympy import QQ
>>> from sympy.abc import x
>>> from sympy.polys.agca import homomorphism
>>> R = QQ.old_poly_ring(x)
>>> T = R.free_module(2)
If ``domain`` is a free module generated by `e_1, \dots, e_n`, then
``matrix`` should be an n-element iterable `(b_1, \dots, b_n)` where
the `b_i` are elements of ``codomain``. The constructed homomorphism is the
unique homomorphism sending `e_i` to `b_i`.
>>> F = R.free_module(2)
>>> h = homomorphism(F, T, [[1, x], [x**2, 0]])
>>> h
Matrix([
[1, x**2], : QQ[x]**2 -> QQ[x]**2
[x, 0]])
>>> h([1, 0])
[1, x]
>>> h([0, 1])
[x**2, 0]
>>> h([1, 1])
[x**2 + 1, x]
If ``domain`` is a submodule of a free module, them ``matrix`` determines
a homomoprhism from the containing free module to ``codomain``, and the
homomorphism returned is obtained by restriction to ``domain``.
>>> S = F.submodule([1, 0], [0, x])
>>> homomorphism(S, T, [[1, x], [x**2, 0]])
Matrix([
[1, x**2], : <[1, 0], [0, x]> -> QQ[x]**2
[x, 0]])
If ``domain`` is a (sub)quotient `N/K`, then ``matrix`` determines a
homomorphism from `N` to ``codomain``. If the kernel contains `K`, this
homomorphism descends to ``domain`` and is returned; otherwise an exception
is raised.
>>> homomorphism(S/[(1, 0)], T, [0, [x**2, 0]])
Matrix([
[0, x**2], : <[1, 0] + <[1, 0]>, [0, x] + <[1, 0]>, [1, 0] + <[1, 0]>> -> QQ[x]**2
[0, 0]])
>>> homomorphism(S/[(0, x)], T, [0, [x**2, 0]])
Traceback (most recent call last):
...
ValueError: kernel <[1, 0], [0, 0]> must contain sm, got <[0,x]>
"""
def freepres(module):
"""
Return a tuple ``(F, S, Q, c)`` where ``F`` is a free module, ``S`` is a
submodule of ``F``, and ``Q`` a submodule of ``S``, such that
``module = S/Q``, and ``c`` is a conversion function.
"""
if isinstance(module, FreeModule):
return module, module, module.submodule(), lambda x: module.convert(x)
if isinstance(module, QuotientModule):
return (module.base, module.base, module.killed_module,
lambda x: module.convert(x).data)
if isinstance(module, SubQuotientModule):
return (module.base.container, module.base, module.killed_module,
lambda x: module.container.convert(x).data)
# an ordinary submodule
return (module.container, module, module.submodule(),
lambda x: module.container.convert(x))
SF, SS, SQ, _ = freepres(domain)
TF, TS, TQ, c = freepres(codomain)
# NOTE this is probably a bit inefficient (redundant checks)
return FreeModuleHomomorphism(SF, TF, [c(x) for x in matrix]
).restrict_domain(SS).restrict_codomain(TS
).quotient_codomain(TQ).quotient_domain(SQ)
|
sublime1809/django | refs/heads/master | django/contrib/gis/db/models/sql/query.py | 7 | from django.db import connections
from django.db.models.query import sql
from django.contrib.gis.db.models.constants import ALL_TERMS
from django.contrib.gis.db.models.fields import GeometryField
from django.contrib.gis.db.models.lookups import GISLookup
from django.contrib.gis.db.models.sql import aggregates as gis_aggregates
from django.contrib.gis.db.models.sql.conversion import GeomField
class GeoQuery(sql.Query):
"""
A single spatial SQL query.
"""
# Overridding the valid query terms.
query_terms = ALL_TERMS
aggregates_module = gis_aggregates
compiler = 'GeoSQLCompiler'
#### Methods overridden from the base Query class ####
def __init__(self, model):
super(GeoQuery, self).__init__(model)
# The following attributes are customized for the GeoQuerySet.
# The SpatialBackend classes contain backend-specific routines and functions.
self.custom_select = {}
self.transformed_srid = None
self.extra_select_fields = {}
def clone(self, *args, **kwargs):
obj = super(GeoQuery, self).clone(*args, **kwargs)
# Customized selection dictionary and transformed srid flag have
# to also be added to obj.
obj.custom_select = self.custom_select.copy()
obj.transformed_srid = self.transformed_srid
obj.extra_select_fields = self.extra_select_fields.copy()
return obj
def get_aggregation(self, using, force_subq=False):
# Remove any aggregates marked for reduction from the subquery
# and move them to the outer AggregateQuery.
connection = connections[using]
for alias, aggregate in self.aggregate_select.items():
if isinstance(aggregate, gis_aggregates.GeoAggregate):
if not getattr(aggregate, 'is_extent', False) or connection.ops.oracle:
self.extra_select_fields[alias] = GeomField()
return super(GeoQuery, self).get_aggregation(using, force_subq)
def resolve_aggregate(self, value, aggregate, connection):
"""
Overridden from GeoQuery's normalize to handle the conversion of
GeoAggregate objects.
"""
if isinstance(aggregate, self.aggregates_module.GeoAggregate):
if aggregate.is_extent:
if aggregate.is_extent == '3D':
return connection.ops.convert_extent3d(value)
else:
return connection.ops.convert_extent(value)
else:
return connection.ops.convert_geom(value, aggregate.source)
else:
return super(GeoQuery, self).resolve_aggregate(value, aggregate, connection)
# Private API utilities, subject to change.
def _geo_field(self, field_name=None):
"""
Returns the first Geometry field encountered; or specified via the
`field_name` keyword. The `field_name` may be a string specifying
the geometry field on this GeoQuery's model, or a lookup string
to a geometry field via a ForeignKey relation.
"""
if field_name is None:
# Incrementing until the first geographic field is found.
for fld in self.model._meta.fields:
if isinstance(fld, GeometryField):
return fld
return False
else:
# Otherwise, check by the given field name -- which may be
# a lookup to a _related_ geographic field.
return GISLookup._check_geo_field(self.model._meta, field_name)
|
bmazin/ARCONS-pipeline | refs/heads/master | examples/sexigesimalDemo.py | 1 | # A simple demonstration of how to use ephem to deal with segesimal strings
import ephem
import math
raSexigesimal = "15:30:00"
decSexigesimal = "-10:01:00"
ra = ephem.hours(raSexigesimal)
dec = ephem.degrees(decSexigesimal) # oddly enough, "degrees" is in radians
print "ra=",ra
print "dec=",dec
print "ra in decimal degrees is ",ra.real*12/math.pi
print "dec in decimal degrees is ",dec.real*180/math.pi
|
optima-ict/odoo | refs/heads/9.0 | addons/mass_mailing/models/mail_thread.py | 37 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import re
from openerp.addons.mail.models.mail_message import decode
from openerp.addons.mail.models.mail_thread import decode_header
from openerp.osv import osv
_logger = logging.getLogger(__name__)
class MailThread(osv.AbstractModel):
""" Update MailThread to add the feature of bounced emails and replied emails
in message_process. """
_name = 'mail.thread'
_inherit = ['mail.thread']
def message_route_check_bounce(self, cr, uid, message, context=None):
""" Override to verify that the email_to is the bounce alias. If it is the
case, log the bounce, set the parent and related document as bounced and
return False to end the routing process. """
bounce_alias = self.pool['ir.config_parameter'].get_param(cr, uid, "mail.bounce.alias", context=context)
message_id = message.get('Message-Id')
email_from = decode_header(message, 'From')
email_to = decode_header(message, 'To')
# 0. Verify whether this is a bounced email (wrong destination,...) -> use it to collect data, such as dead leads
if bounce_alias and bounce_alias in email_to:
# Bounce regex
# Typical form of bounce is bounce_alias-128-crm.lead-34@domain
# group(1) = the mail ID; group(2) = the model (if any); group(3) = the record ID
bounce_re = re.compile("%s-(\d+)-?([\w.]+)?-?(\d+)?" % re.escape(bounce_alias), re.UNICODE)
bounce_match = bounce_re.search(email_to)
if bounce_match:
bounced_model, bounced_thread_id = None, False
bounced_mail_id = bounce_match.group(1)
stat_ids = self.pool['mail.mail.statistics'].set_bounced(cr, uid, mail_mail_ids=[bounced_mail_id], context=context)
for stat in self.pool['mail.mail.statistics'].browse(cr, uid, stat_ids, context=context):
bounced_model = stat.model
bounced_thread_id = stat.res_id
_logger.info('Routing mail from %s to %s with Message-Id %s: bounced mail from mail %s, model: %s, thread_id: %s',
email_from, email_to, message_id, bounced_mail_id, bounced_model, bounced_thread_id)
if bounced_model and bounced_model in self.pool and hasattr(self.pool[bounced_model], 'message_receive_bounce') and bounced_thread_id:
self.pool[bounced_model].message_receive_bounce(cr, uid, [bounced_thread_id], mail_id=bounced_mail_id, context=context)
return False
return True
def message_route(self, cr, uid, message, message_dict, model=None, thread_id=None,
custom_values=None, context=None):
if not self.message_route_check_bounce(cr, uid, message, context=context):
return []
return super(MailThread, self).message_route(cr, uid, message, message_dict, model, thread_id, custom_values, context)
def message_receive_bounce(self, cr, uid, ids, mail_id=None, context=None):
"""Called by ``message_process`` when a bounce email (such as Undelivered
Mail Returned to Sender) is received for an existing thread. The default
behavior is to check is an integer ``message_bounce`` column exists.
If it is the case, its content is incremented. """
if 'message_bounce' in self._fields:
for obj in self.browse(cr, uid, ids, context=context):
self.write(cr, uid, [obj.id], {'message_bounce': obj.message_bounce + 1}, context=context)
def message_route_process(self, cr, uid, message, message_dict, routes, context=None):
""" Override to update the parent mail statistics. The parent is found
by using the References header of the incoming message and looking for
matching message_id in mail.mail.statistics. """
if message.get('References'):
message_ids = [x.strip() for x in decode(message['References']).split()]
self.pool['mail.mail.statistics'].set_replied(cr, uid, mail_message_ids=message_ids, context=context)
return super(MailThread, self).message_route_process(cr, uid, message, message_dict, routes, context=context)
|
sadmansk/servo | refs/heads/master | tests/wpt/web-platform-tests/xhr/resources/upload.py | 26 | def main(request, response):
content = []
for key, values in sorted(item for item in request.POST.items() if not hasattr(item[1][0], "filename")):
content.append("%s=%s," % (key, values[0]))
content.append("\n")
for key, values in sorted(item for item in request.POST.items() if hasattr(item[1][0], "filename")):
value = values[0]
content.append("%s=%s:%s:%s," % (key,
value.filename,
value.headers["Content-Type"],
len(value.file.read())))
return "".join(content)
|
hoburg/gpkit | refs/heads/master | docs/source/examples/sub_multi_values.py | 1 | "Example substitution; adapted from t_sub.py/t_NomialSubs/test_Vector"
from gpkit import Variable, VectorVariable
x = Variable("x")
y = Variable("y")
z = VectorVariable(2, "z")
p = x*y*z
assert all(p.sub({x: 1, "y": 2}) == 2*z)
assert all(p.sub({x: 1, y: 2, "z": [1, 2]}) == z.sub({z: [2, 4]}))
|
iansprice/wagtail | refs/heads/master | wagtail/wagtailredirects/permissions.py | 9 | from __future__ import absolute_import, unicode_literals
from wagtail.wagtailcore.permission_policies import ModelPermissionPolicy
from wagtail.wagtailredirects.models import Redirect
permission_policy = ModelPermissionPolicy(Redirect)
|
boshnivolo/TIY-Assignments | refs/heads/master | node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/generator/analyzer.py | 294 | # Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script is intended for use as a GYP_GENERATOR. It takes as input (by way of
the generator flag config_path) the path of a json file that dictates the files
and targets to search for. The following keys are supported:
files: list of paths (relative) of the files to search for.
targets: list of targets to search for. The target names are unqualified.
The following is output:
error: only supplied if there is an error.
targets: the set of targets passed in via targets that either directly or
indirectly depend upon the set of paths supplied in files.
build_targets: minimal set of targets that directly depend on the changed
files and need to be built. The expectation is this set of targets is passed
into a build step.
status: outputs one of three values: none of the supplied files were found,
one of the include files changed so that it should be assumed everything
changed (in this case targets and build_targets are not output) or at
least one file was found.
invalid_targets: list of supplied targets thare were not found.
If the generator flag analyzer_output_path is specified, output is written
there. Otherwise output is written to stdout.
"""
import gyp.common
import gyp.ninja_syntax as ninja_syntax
import json
import os
import posixpath
import sys
debug = False
found_dependency_string = 'Found dependency'
no_dependency_string = 'No dependencies'
# Status when it should be assumed that everything has changed.
all_changed_string = 'Found dependency (all)'
# MatchStatus is used indicate if and how a target depends upon the supplied
# sources.
# The target's sources contain one of the supplied paths.
MATCH_STATUS_MATCHES = 1
# The target has a dependency on another target that contains one of the
# supplied paths.
MATCH_STATUS_MATCHES_BY_DEPENDENCY = 2
# The target's sources weren't in the supplied paths and none of the target's
# dependencies depend upon a target that matched.
MATCH_STATUS_DOESNT_MATCH = 3
# The target doesn't contain the source, but the dependent targets have not yet
# been visited to determine a more specific status yet.
MATCH_STATUS_TBD = 4
generator_supports_multiple_toolsets = gyp.common.CrossCompileRequested()
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
generator_default_variables[dirname] = '!!!'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def _ToGypPath(path):
"""Converts a path to the format used by gyp."""
if os.sep == '\\' and os.altsep == '/':
return path.replace('\\', '/')
return path
def _ResolveParent(path, base_path_components):
"""Resolves |path|, which starts with at least one '../'. Returns an empty
string if the path shouldn't be considered. See _AddSources() for a
description of |base_path_components|."""
depth = 0
while path.startswith('../'):
depth += 1
path = path[3:]
# Relative includes may go outside the source tree. For example, an action may
# have inputs in /usr/include, which are not in the source tree.
if depth > len(base_path_components):
return ''
if depth == len(base_path_components):
return path
return '/'.join(base_path_components[0:len(base_path_components) - depth]) + \
'/' + path
def _AddSources(sources, base_path, base_path_components, result):
"""Extracts valid sources from |sources| and adds them to |result|. Each
source file is relative to |base_path|, but may contain '..'. To make
resolving '..' easier |base_path_components| contains each of the
directories in |base_path|. Additionally each source may contain variables.
Such sources are ignored as it is assumed dependencies on them are expressed
and tracked in some other means."""
# NOTE: gyp paths are always posix style.
for source in sources:
if not len(source) or source.startswith('!!!') or source.startswith('$'):
continue
# variable expansion may lead to //.
org_source = source
source = source[0] + source[1:].replace('//', '/')
if source.startswith('../'):
source = _ResolveParent(source, base_path_components)
if len(source):
result.append(source)
continue
result.append(base_path + source)
if debug:
print 'AddSource', org_source, result[len(result) - 1]
def _ExtractSourcesFromAction(action, base_path, base_path_components,
results):
if 'inputs' in action:
_AddSources(action['inputs'], base_path, base_path_components, results)
def _ToLocalPath(toplevel_dir, path):
"""Converts |path| to a path relative to |toplevel_dir|."""
if path == toplevel_dir:
return ''
if path.startswith(toplevel_dir + '/'):
return path[len(toplevel_dir) + len('/'):]
return path
def _ExtractSources(target, target_dict, toplevel_dir):
# |target| is either absolute or relative and in the format of the OS. Gyp
# source paths are always posix. Convert |target| to a posix path relative to
# |toplevel_dir_|. This is done to make it easy to build source paths.
base_path = posixpath.dirname(_ToLocalPath(toplevel_dir, _ToGypPath(target)))
base_path_components = base_path.split('/')
# Add a trailing '/' so that _AddSources() can easily build paths.
if len(base_path):
base_path += '/'
if debug:
print 'ExtractSources', target, base_path
results = []
if 'sources' in target_dict:
_AddSources(target_dict['sources'], base_path, base_path_components,
results)
# Include the inputs from any actions. Any changes to these affect the
# resulting output.
if 'actions' in target_dict:
for action in target_dict['actions']:
_ExtractSourcesFromAction(action, base_path, base_path_components,
results)
if 'rules' in target_dict:
for rule in target_dict['rules']:
_ExtractSourcesFromAction(rule, base_path, base_path_components, results)
return results
class Target(object):
"""Holds information about a particular target:
deps: set of Targets this Target depends upon. This is not recursive, only the
direct dependent Targets.
match_status: one of the MatchStatus values.
back_deps: set of Targets that have a dependency on this Target.
visited: used during iteration to indicate whether we've visited this target.
This is used for two iterations, once in building the set of Targets and
again in _GetBuildTargets().
name: fully qualified name of the target.
requires_build: True if the target type is such that it needs to be built.
See _DoesTargetTypeRequireBuild for details.
added_to_compile_targets: used when determining if the target was added to the
set of targets that needs to be built.
in_roots: true if this target is a descendant of one of the root nodes.
is_executable: true if the type of target is executable."""
def __init__(self, name):
self.deps = set()
self.match_status = MATCH_STATUS_TBD
self.back_deps = set()
self.name = name
# TODO(sky): I don't like hanging this off Target. This state is specific
# to certain functions and should be isolated there.
self.visited = False
self.requires_build = False
self.added_to_compile_targets = False
self.in_roots = False
self.is_executable = False
class Config(object):
"""Details what we're looking for
files: set of files to search for
targets: see file description for details."""
def __init__(self):
self.files = []
self.targets = set()
def Init(self, params):
"""Initializes Config. This is a separate method as it raises an exception
if there is a parse error."""
generator_flags = params.get('generator_flags', {})
config_path = generator_flags.get('config_path', None)
if not config_path:
return
try:
f = open(config_path, 'r')
config = json.load(f)
f.close()
except IOError:
raise Exception('Unable to open file ' + config_path)
except ValueError as e:
raise Exception('Unable to parse config file ' + config_path + str(e))
if not isinstance(config, dict):
raise Exception('config_path must be a JSON file containing a dictionary')
self.files = config.get('files', [])
self.targets = set(config.get('targets', []))
def _WasBuildFileModified(build_file, data, files, toplevel_dir):
"""Returns true if the build file |build_file| is either in |files| or
one of the files included by |build_file| is in |files|. |toplevel_dir| is
the root of the source tree."""
if _ToLocalPath(toplevel_dir, _ToGypPath(build_file)) in files:
if debug:
print 'gyp file modified', build_file
return True
# First element of included_files is the file itself.
if len(data[build_file]['included_files']) <= 1:
return False
for include_file in data[build_file]['included_files'][1:]:
# |included_files| are relative to the directory of the |build_file|.
rel_include_file = \
_ToGypPath(gyp.common.UnrelativePath(include_file, build_file))
if _ToLocalPath(toplevel_dir, rel_include_file) in files:
if debug:
print 'included gyp file modified, gyp_file=', build_file, \
'included file=', rel_include_file
return True
return False
def _GetOrCreateTargetByName(targets, target_name):
"""Creates or returns the Target at targets[target_name]. If there is no
Target for |target_name| one is created. Returns a tuple of whether a new
Target was created and the Target."""
if target_name in targets:
return False, targets[target_name]
target = Target(target_name)
targets[target_name] = target
return True, target
def _DoesTargetTypeRequireBuild(target_dict):
"""Returns true if the target type is such that it needs to be built."""
# If a 'none' target has rules or actions we assume it requires a build.
return target_dict['type'] != 'none' or \
target_dict.get('actions') or target_dict.get('rules')
def _GenerateTargets(data, target_list, target_dicts, toplevel_dir, files,
build_files):
"""Returns a tuple of the following:
. A dictionary mapping from fully qualified name to Target.
. A list of the targets that have a source file in |files|.
. Set of root Targets reachable from the the files |build_files|.
This sets the |match_status| of the targets that contain any of the source
files in |files| to MATCH_STATUS_MATCHES.
|toplevel_dir| is the root of the source tree."""
# Maps from target name to Target.
targets = {}
# Targets that matched.
matching_targets = []
# Queue of targets to visit.
targets_to_visit = target_list[:]
# Maps from build file to a boolean indicating whether the build file is in
# |files|.
build_file_in_files = {}
# Root targets across all files.
roots = set()
# Set of Targets in |build_files|.
build_file_targets = set()
while len(targets_to_visit) > 0:
target_name = targets_to_visit.pop()
created_target, target = _GetOrCreateTargetByName(targets, target_name)
if created_target:
roots.add(target)
elif target.visited:
continue
target.visited = True
target.requires_build = _DoesTargetTypeRequireBuild(
target_dicts[target_name])
target.is_executable = target_dicts[target_name]['type'] == 'executable'
build_file = gyp.common.ParseQualifiedTarget(target_name)[0]
if not build_file in build_file_in_files:
build_file_in_files[build_file] = \
_WasBuildFileModified(build_file, data, files, toplevel_dir)
if build_file in build_files:
build_file_targets.add(target)
# If a build file (or any of its included files) is modified we assume all
# targets in the file are modified.
if build_file_in_files[build_file]:
print 'matching target from modified build file', target_name
target.match_status = MATCH_STATUS_MATCHES
matching_targets.append(target)
else:
sources = _ExtractSources(target_name, target_dicts[target_name],
toplevel_dir)
for source in sources:
if source in files:
print 'target', target_name, 'matches', source
target.match_status = MATCH_STATUS_MATCHES
matching_targets.append(target)
break
# Add dependencies to visit as well as updating back pointers for deps.
for dep in target_dicts[target_name].get('dependencies', []):
targets_to_visit.append(dep)
created_dep_target, dep_target = _GetOrCreateTargetByName(targets, dep)
if not created_dep_target:
roots.discard(dep_target)
target.deps.add(dep_target)
dep_target.back_deps.add(target)
return targets, matching_targets, roots & build_file_targets
def _GetUnqualifiedToTargetMapping(all_targets, to_find):
"""Returns a mapping (dictionary) from unqualified name to Target for all the
Targets in |to_find|."""
result = {}
if not to_find:
return result
to_find = set(to_find)
for target_name in all_targets.keys():
extracted = gyp.common.ParseQualifiedTarget(target_name)
if len(extracted) > 1 and extracted[1] in to_find:
to_find.remove(extracted[1])
result[extracted[1]] = all_targets[target_name]
if not to_find:
return result
return result
def _DoesTargetDependOn(target):
"""Returns true if |target| or any of its dependencies matches the supplied
set of paths. This updates |matches| of the Targets as it recurses.
target: the Target to look for."""
if target.match_status == MATCH_STATUS_DOESNT_MATCH:
return False
if target.match_status == MATCH_STATUS_MATCHES or \
target.match_status == MATCH_STATUS_MATCHES_BY_DEPENDENCY:
return True
for dep in target.deps:
if _DoesTargetDependOn(dep):
target.match_status = MATCH_STATUS_MATCHES_BY_DEPENDENCY
return True
target.match_status = MATCH_STATUS_DOESNT_MATCH
return False
def _GetTargetsDependingOn(possible_targets):
"""Returns the list of Targets in |possible_targets| that depend (either
directly on indirectly) on the matched targets.
possible_targets: targets to search from."""
found = []
for target in possible_targets:
if _DoesTargetDependOn(target):
found.append(target)
return found
def _AddBuildTargets(target, roots, add_if_no_ancestor, result):
"""Recurses through all targets that depend on |target|, adding all targets
that need to be built (and are in |roots|) to |result|.
roots: set of root targets.
add_if_no_ancestor: If true and there are no ancestors of |target| then add
|target| to |result|. |target| must still be in |roots|.
result: targets that need to be built are added here."""
if target.visited:
return
target.visited = True
target.in_roots = not target.back_deps and target in roots
for back_dep_target in target.back_deps:
_AddBuildTargets(back_dep_target, roots, False, result)
target.added_to_compile_targets |= back_dep_target.added_to_compile_targets
target.in_roots |= back_dep_target.in_roots
# Always add 'executable' targets. Even though they may be built by other
# targets that depend upon them it makes detection of what is going to be
# built easier.
if target.in_roots and \
(target.is_executable or
(not target.added_to_compile_targets and
(add_if_no_ancestor or target.requires_build))):
result.add(target)
target.added_to_compile_targets = True
def _GetBuildTargets(matching_targets, roots):
"""Returns the set of Targets that require a build.
matching_targets: targets that changed and need to be built.
roots: set of root targets in the build files to search from."""
result = set()
for target in matching_targets:
_AddBuildTargets(target, roots, True, result)
return result
def _WriteOutput(params, **values):
"""Writes the output, either to stdout or a file is specified."""
if 'error' in values:
print 'Error:', values['error']
if 'status' in values:
print values['status']
if 'targets' in values:
values['targets'].sort()
print 'Supplied targets that depend on changed files:'
for target in values['targets']:
print '\t', target
if 'invalid_targets' in values:
values['invalid_targets'].sort()
print 'The following targets were not found:'
for target in values['invalid_targets']:
print '\t', target
if 'build_targets' in values:
values['build_targets'].sort()
print 'Targets that require a build:'
for target in values['build_targets']:
print '\t', target
output_path = params.get('generator_flags', {}).get(
'analyzer_output_path', None)
if not output_path:
print json.dumps(values)
return
try:
f = open(output_path, 'w')
f.write(json.dumps(values) + '\n')
f.close()
except IOError as e:
print 'Error writing to output file', output_path, str(e)
def _WasGypIncludeFileModified(params, files):
"""Returns true if one of the files in |files| is in the set of included
files."""
if params['options'].includes:
for include in params['options'].includes:
if _ToGypPath(include) in files:
print 'Include file modified, assuming all changed', include
return True
return False
def _NamesNotIn(names, mapping):
"""Returns a list of the values in |names| that are not in |mapping|."""
return [name for name in names if name not in mapping]
def _LookupTargets(names, mapping):
"""Returns a list of the mapping[name] for each value in |names| that is in
|mapping|."""
return [mapping[name] for name in names if name in mapping]
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
elif flavor == 'win':
default_variables.setdefault('OS', 'win')
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
def GenerateOutput(target_list, target_dicts, data, params):
"""Called by gyp as the final stage. Outputs results."""
config = Config()
try:
config.Init(params)
if not config.files:
raise Exception('Must specify files to analyze via config_path generator '
'flag')
toplevel_dir = _ToGypPath(os.path.abspath(params['options'].toplevel_dir))
if debug:
print 'toplevel_dir', toplevel_dir
if _WasGypIncludeFileModified(params, config.files):
result_dict = { 'status': all_changed_string,
'targets': list(config.targets) }
_WriteOutput(params, **result_dict)
return
all_targets, matching_targets, roots = _GenerateTargets(
data, target_list, target_dicts, toplevel_dir, frozenset(config.files),
params['build_files'])
unqualified_mapping = _GetUnqualifiedToTargetMapping(all_targets,
config.targets)
invalid_targets = None
if len(unqualified_mapping) != len(config.targets):
invalid_targets = _NamesNotIn(config.targets, unqualified_mapping)
if matching_targets:
search_targets = _LookupTargets(config.targets, unqualified_mapping)
matched_search_targets = _GetTargetsDependingOn(search_targets)
# Reset the visited status for _GetBuildTargets.
for target in all_targets.itervalues():
target.visited = False
build_targets = _GetBuildTargets(matching_targets, roots)
matched_search_targets = [gyp.common.ParseQualifiedTarget(target.name)[1]
for target in matched_search_targets]
build_targets = [gyp.common.ParseQualifiedTarget(target.name)[1]
for target in build_targets]
else:
matched_search_targets = []
build_targets = []
result_dict = { 'targets': matched_search_targets,
'status': found_dependency_string if matching_targets else
no_dependency_string,
'build_targets': build_targets}
if invalid_targets:
result_dict['invalid_targets'] = invalid_targets
_WriteOutput(params, **result_dict)
except Exception as e:
_WriteOutput(params, error=str(e))
|
uber/pyro | refs/heads/dev | examples/vae/utils/custom_mlp.py | 1 | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
from inspect import isclass
import torch
import torch.nn as nn
from pyro.distributions.util import broadcast_shape
class Exp(nn.Module):
"""
a custom module for exponentiation of tensors
"""
def __init__(self):
super().__init__()
def forward(self, val):
return torch.exp(val)
class ConcatModule(nn.Module):
"""
a custom module for concatenation of tensors
"""
def __init__(self, allow_broadcast=False):
self.allow_broadcast = allow_broadcast
super().__init__()
def forward(self, *input_args):
# we have a single object
if len(input_args) == 1:
# regardless of type,
# we don't care about single objects
# we just index into the object
input_args = input_args[0]
# don't concat things that are just single objects
if torch.is_tensor(input_args):
return input_args
else:
if self.allow_broadcast:
shape = broadcast_shape(*[s.shape[:-1] for s in input_args]) + (-1,)
input_args = [s.expand(shape) for s in input_args]
return torch.cat(input_args, dim=-1)
class ListOutModule(nn.ModuleList):
"""
a custom module for outputting a list of tensors from a list of nn modules
"""
def __init__(self, modules):
super().__init__(modules)
def forward(self, *args, **kwargs):
# loop over modules in self, apply same args
return [mm.forward(*args, **kwargs) for mm in self]
def call_nn_op(op):
"""
a helper function that adds appropriate parameters when calling
an nn module representing an operation like Softmax
:param op: the nn.Module operation to instantiate
:return: instantiation of the op module with appropriate parameters
"""
if op in [nn.Softmax, nn.LogSoftmax]:
return op(dim=1)
else:
return op()
class MLP(nn.Module):
def __init__(self, mlp_sizes, activation=nn.ReLU, output_activation=None,
post_layer_fct=lambda layer_ix, total_layers, layer: None,
post_act_fct=lambda layer_ix, total_layers, layer: None,
allow_broadcast=False, use_cuda=False):
# init the module object
super().__init__()
assert len(mlp_sizes) >= 2, "Must have input and output layer sizes defined"
# get our inputs, outputs, and hidden
input_size, hidden_sizes, output_size = mlp_sizes[0], mlp_sizes[1:-1], mlp_sizes[-1]
# assume int or list
assert isinstance(input_size, (int, list, tuple)), "input_size must be int, list, tuple"
# everything in MLP will be concatted if it's multiple arguments
last_layer_size = input_size if type(input_size) == int else sum(input_size)
# everything sent in will be concatted together by default
all_modules = [ConcatModule(allow_broadcast)]
# loop over l
for layer_ix, layer_size in enumerate(hidden_sizes):
assert type(layer_size) == int, "Hidden layer sizes must be ints"
# get our nn layer module (in this case nn.Linear by default)
cur_linear_layer = nn.Linear(last_layer_size, layer_size)
# for numerical stability -- initialize the layer properly
cur_linear_layer.weight.data.normal_(0, 0.001)
cur_linear_layer.bias.data.normal_(0, 0.001)
# use GPUs to share data during training (if available)
if use_cuda:
cur_linear_layer = nn.DataParallel(cur_linear_layer)
# add our linear layer
all_modules.append(cur_linear_layer)
# handle post_linear
post_linear = post_layer_fct(layer_ix + 1, len(hidden_sizes), all_modules[-1])
# if we send something back, add it to sequential
# here we could return a batch norm for example
if post_linear is not None:
all_modules.append(post_linear)
# handle activation (assumed no params -- deal with that later)
all_modules.append(activation())
# now handle after activation
post_activation = post_act_fct(layer_ix + 1, len(hidden_sizes), all_modules[-1])
# handle post_activation if not null
# could add batch norm for example
if post_activation is not None:
all_modules.append(post_activation)
# save the layer size we just created
last_layer_size = layer_size
# now we have all of our hidden layers
# we handle outputs
assert isinstance(output_size, (int, list, tuple)), "output_size must be int, list, tuple"
if type(output_size) == int:
all_modules.append(nn.Linear(last_layer_size, output_size))
if output_activation is not None:
all_modules.append(call_nn_op(output_activation)
if isclass(output_activation) else output_activation)
else:
# we're going to have a bunch of separate layers we can spit out (a tuple of outputs)
out_layers = []
# multiple outputs? handle separately
for out_ix, out_size in enumerate(output_size):
# for a single output object, we create a linear layer and some weights
split_layer = []
# we have an activation function
split_layer.append(nn.Linear(last_layer_size, out_size))
# then we get our output activation (either we repeat all or we index into a same sized array)
act_out_fct = output_activation if not isinstance(output_activation, (list, tuple)) \
else output_activation[out_ix]
if(act_out_fct):
# we check if it's a class. if so, instantiate the object
# otherwise, use the object directly (e.g. pre-instaniated)
split_layer.append(call_nn_op(act_out_fct)
if isclass(act_out_fct) else act_out_fct)
# our outputs is just a sequential of the two
out_layers.append(nn.Sequential(*split_layer))
all_modules.append(ListOutModule(out_layers))
# now we have all of our modules, we're ready to build our sequential!
# process mlps in order, pretty standard here
self.sequential_mlp = nn.Sequential(*all_modules)
# pass through our sequential for the output!
def forward(self, *args, **kwargs):
return self.sequential_mlp.forward(*args, **kwargs)
|
cherusk/ansible | refs/heads/devel | lib/ansible/plugins/terminal/vyos.py | 41 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
from ansible.plugins.terminal import TerminalBase
from ansible.errors import AnsibleConnectionFailure
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(r"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(r"\@[\w\-\.]+:\S+?[>#\$] ?$")
]
terminal_stderr_re = [
re.compile(r"\n\s*Invalid command:"),
re.compile(r"\nCommit failed"),
re.compile(r"\n\s+Set failed"),
]
terminal_length = os.getenv('ANSIBLE_VYOS_TERMINAL_LENGTH', 10000)
def on_open_shell(self):
try:
self._exec_cli_command('set terminal length 0')
self._exec_cli_command('set terminal length %s' % self.terminal_length)
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
|
ostrovok-team/suds | refs/heads/master | suds/transport/options.py | 201 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Contains classes for transport options.
"""
from suds.transport import *
from suds.properties import *
class Options(Skin):
"""
Options:
- B{proxy} - An http proxy to be specified on requests.
The proxy is defined as {protocol:proxy,}
- type: I{dict}
- default: {}
- B{timeout} - Set the url open timeout (seconds).
- type: I{float}
- default: 90
- B{headers} - Extra HTTP headers.
- type: I{dict}
- I{str} B{http} - The I{http} protocol proxy URL.
- I{str} B{https} - The I{https} protocol proxy URL.
- default: {}
- B{username} - The username used for http authentication.
- type: I{str}
- default: None
- B{password} - The password used for http authentication.
- type: I{str}
- default: None
"""
def __init__(self, **kwargs):
domain = __name__
definitions = [
Definition('proxy', dict, {}),
Definition('timeout', (int,float), 90),
Definition('headers', dict, {}),
Definition('username', basestring, None),
Definition('password', basestring, None),
]
Skin.__init__(self, domain, definitions, kwargs) |
mbauskar/internal-hr | refs/heads/develop | erpnext/accounts/doctype/pos_setting/pos_setting.py | 42 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import msgprint, _
from frappe.utils import cint
from frappe.model.document import Document
class POSSetting(Document):
def validate(self):
self.check_for_duplicate()
self.validate_expense_account()
self.validate_all_link_fields()
def check_for_duplicate(self):
res = frappe.db.sql("""select name, user from `tabPOS Setting`
where ifnull(user, '') = %s and name != %s and company = %s""",
(self.user, self.name, self.company))
if res:
if res[0][1]:
msgprint(_("POS Setting {0} already created for user: {1} and company {2}").format(res[0][0],
res[0][1], self.company), raise_exception=1)
else:
msgprint(_("Global POS Setting {0} already created for company {1}").format(res[0][0],
self.company), raise_exception=1)
def validate_expense_account(self):
if cint(frappe.defaults.get_global_default("auto_accounting_for_stock")) \
and not self.expense_account:
msgprint(_("Expense Account is mandatory"), raise_exception=1)
def validate_all_link_fields(self):
accounts = {"Account": [self.cash_bank_account, self.income_account,
self.expense_account], "Cost Center": [self.cost_center],
"Warehouse": [self.warehouse]}
for link_dt, dn_list in accounts.items():
for link_dn in dn_list:
if link_dn and not frappe.db.exists({"doctype": link_dt,
"company": self.company, "name": link_dn}):
frappe.throw(_("{0} does not belong to Company {1}").format(link_dn, self.company))
def on_update(self):
self.set_defaults()
def on_trash(self):
self.set_defaults(include_current_pos=False)
def set_defaults(self, include_current_pos=True):
frappe.defaults.clear_default("is_pos")
if not include_current_pos:
condition = " where name != '%s'" % self.name.replace("'", "\'")
else:
condition = ""
pos_view_users = frappe.db.sql_list("""select user
from `tabPOS Setting` {0}""".format(condition))
for user in pos_view_users:
if user:
frappe.defaults.set_user_default("is_pos", 1, user)
else:
frappe.defaults.set_global_default("is_pos", 1)
@frappe.whitelist()
def get_series():
return frappe.get_meta("Sales Invoice").get_field("naming_series").options or ""
|
ChinaQuants/PTVS | refs/heads/master | Python/Tests/TestData/VirtualEnv/env/Lib/encodings/shift_jisx0213.py | 61 | #
# shift_jisx0213.py: Python Unicode Codec for SHIFT_JISX0213
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_jp, codecs
import _multibytecodec as mbc
codec = _codecs_jp.getcodec('shift_jisx0213')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='shift_jisx0213',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
cernops/rally | refs/heads/master | tests/hacking/checks.py | 9 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Guidelines for writing new hacking checks
- Use only for Rally specific tests. OpenStack general tests
should be submitted to the common 'hacking' module.
- Pick numbers in the range N3xx. Find the current test with
the highest allocated number and then pick the next value.
- Keep the test method code in the source file ordered based
on the N3xx value.
- List the new rule in the top level HACKING.rst file
- Add test cases for each new rule to tests/unit/test_hacking.py
"""
import functools
import re
import tokenize
re_assert_true_instance = re.compile(
r"(.)*assertTrue\(isinstance\((\w|\.|\'|\"|\[|\])+, "
r"(\w|\.|\'|\"|\[|\])+\)\)")
re_assert_equal_type = re.compile(
r"(.)*assertEqual\(type\((\w|\.|\'|\"|\[|\])+\), "
r"(\w|\.|\'|\"|\[|\])+\)")
re_assert_equal_end_with_none = re.compile(r"assertEqual\(.*?,\s+None\)$")
re_assert_equal_start_with_none = re.compile(r"assertEqual\(None,")
re_assert_true_false_with_in_or_not_in = re.compile(
r"assert(True|False)\("
r"(\w|[][.'\"])+( not)? in (\w|[][.'\",])+(, .*)?\)")
re_assert_true_false_with_in_or_not_in_spaces = re.compile(
r"assert(True|False)\((\w|[][.'\"])+( not)? in [\[|'|\"](\w|[][.'\", ])+"
r"[\[|'|\"](, .*)?\)")
re_assert_equal_in_end_with_true_or_false = re.compile(
r"assertEqual\((\w|[][.'\"])+( not)? in (\w|[][.'\", ])+, (True|False)\)")
re_assert_equal_in_start_with_true_or_false = re.compile(
r"assertEqual\((True|False), (\w|[][.'\"])+( not)? in (\w|[][.'\", ])+\)")
re_no_construct_dict = re.compile(
r"\sdict\(\)")
re_no_construct_list = re.compile(
r"\slist\(\)")
re_str_format = re.compile(r"""
% # start of specifier
\(([^)]+)\) # mapping key, in group 1
[#0 +\-]? # optional conversion flag
(?:-?\d*)? # optional minimum field width
(?:\.\d*)? # optional precision
[hLl]? # optional length modifier
[A-z%] # conversion modifier
""", re.X)
def skip_ignored_lines(func):
@functools.wraps(func)
def wrapper(logical_line, filename):
line = logical_line.strip()
if not line or line.startswith("#") or line.endswith("# noqa"):
return
yield next(func(logical_line, filename))
return wrapper
def _parse_assert_mock_str(line):
point = line.find(".assert_")
if point != -1:
end_pos = line[point:].find("(") + point
return point, line[point + 1: end_pos], line[: point]
else:
return None, None, None
@skip_ignored_lines
def check_assert_methods_from_mock(logical_line, filename):
"""Ensure that ``assert_*`` methods from ``mock`` library is used correctly
N301 - base error number
N302 - related to nonexistent "assert_called"
N303 - related to nonexistent "assert_called_once"
"""
correct_names = ["assert_any_call", "assert_called_once_with",
"assert_called_with", "assert_has_calls"]
ignored_files = ["./tests/unit/test_hacking.py"]
if filename.startswith("./tests") and filename not in ignored_files:
pos, method_name, obj_name = _parse_assert_mock_str(logical_line)
if pos:
if method_name not in correct_names:
error_number = "N301"
msg = ("%(error_number)s:'%(method)s' is not present in `mock`"
" library. %(custom_msg)s For more details, visit "
"http://www.voidspace.org.uk/python/mock/ .")
if method_name == "assert_called":
error_number = "N302"
custom_msg = ("Maybe, you should try to use "
"'assertTrue(%s.called)' instead." %
obj_name)
elif method_name == "assert_called_once":
# For more details, see a bug in Rally:
# https://bugs.launchpad.net/rally/+bug/1305991
error_number = "N303"
custom_msg = ("Maybe, you should try to use "
"'assertEqual(1, %s.call_count)' "
"or '%s.assert_called_once_with()'"
" instead." % (obj_name, obj_name))
else:
custom_msg = ("Correct 'assert_*' methods: '%s'."
% "', '".join(correct_names))
yield (pos, msg % {
"error_number": error_number,
"method": method_name,
"custom_msg": custom_msg})
@skip_ignored_lines
def check_import_of_logging(logical_line, filename):
"""Check correctness import of logging module
N310
"""
excluded_files = ["./rally/common/log.py", "./tests/unit/test_log.py"]
forbidden_imports = ["from oslo_log",
"import oslo_log",
"import logging"]
if filename not in excluded_files:
for forbidden_import in forbidden_imports:
if logical_line.startswith(forbidden_import):
yield (0, "N310 Wrong module for logging is imported. Please "
"use `rally.common.log` instead.")
@skip_ignored_lines
def no_translate_debug_logs(logical_line, filename):
"""Check for "LOG.debug(_("
As per our translation policy,
https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation
we shouldn't translate debug level logs.
* This check assumes that 'LOG' is a logger.
* Use filename so we can start enforcing this in specific folders instead
of needing to do so all at once.
N311
"""
if logical_line.startswith("LOG.debug(_("):
yield(0, "N311 Don't translate debug level logs")
@skip_ignored_lines
def no_use_conf_debug_check(logical_line, filename):
"""Check for "cfg.CONF.debug"
Rally has two DEBUG level:
- Full DEBUG, which include all debug-messages from all OpenStack services
- Rally DEBUG, which include only Rally debug-messages
so we should use custom check to know debug-mode, instead of CONF.debug
N312
"""
excluded_files = ["./rally/common/log.py"]
point = logical_line.find("CONF.debug")
if point != -1 and filename not in excluded_files:
yield(point, "N312 Don't use `CONF.debug`. "
"Function `rally.common.log.is_debug` "
"should be used instead.")
@skip_ignored_lines
def assert_true_instance(logical_line, filename):
"""Check for assertTrue(isinstance(a, b)) sentences
N320
"""
if re_assert_true_instance.match(logical_line):
yield (0, "N320 assertTrue(isinstance(a, b)) sentences not allowed, "
"you should use assertIsInstance(a, b) instead.")
@skip_ignored_lines
def assert_equal_type(logical_line, filename):
"""Check for assertEqual(type(A), B) sentences
N321
"""
if re_assert_equal_type.match(logical_line):
yield (0, "N321 assertEqual(type(A), B) sentences not allowed, "
"you should use assertIsInstance(a, b) instead.")
@skip_ignored_lines
def assert_equal_none(logical_line, filename):
"""Check for assertEqual(A, None) or assertEqual(None, A) sentences
N322
"""
res = (re_assert_equal_start_with_none.search(logical_line) or
re_assert_equal_end_with_none.search(logical_line))
if res:
yield (0, "N322 assertEqual(A, None) or assertEqual(None, A) "
"sentences not allowed, you should use assertIsNone(A) "
"instead.")
@skip_ignored_lines
def assert_true_or_false_with_in(logical_line, filename):
"""Check assertTrue/False(A in/not in B) with collection contents
Check for assertTrue/False(A in B), assertTrue/False(A not in B),
assertTrue/False(A in B, message) or assertTrue/False(A not in B, message)
sentences.
N323
"""
res = (re_assert_true_false_with_in_or_not_in.search(logical_line) or
re_assert_true_false_with_in_or_not_in_spaces.search(logical_line))
if res:
yield (0, "N323 assertTrue/assertFalse(A in/not in B)sentences not "
"allowed, you should use assertIn(A, B) or assertNotIn(A, B)"
" instead.")
@skip_ignored_lines
def assert_equal_in(logical_line, filename):
"""Check assertEqual(A in/not in B, True/False) with collection contents
Check for assertEqual(A in B, True/False), assertEqual(True/False, A in B),
assertEqual(A not in B, True/False) or assertEqual(True/False, A not in B)
sentences.
N324
"""
res = (re_assert_equal_in_end_with_true_or_false.search(logical_line) or
re_assert_equal_in_start_with_true_or_false.search(logical_line))
if res:
yield (0, "N324: Use assertIn/NotIn(A, B) rather than "
"assertEqual(A in/not in B, True/False) when checking "
"collection contents.")
@skip_ignored_lines
def check_no_direct_rally_objects_import(logical_line, filename):
"""Check if rally.common.objects are properly imported.
If you import "from rally.common import objects" you are able to use
objects directly like objects.Task.
N340
"""
if filename == "./rally/common/objects/__init__.py":
return
if (logical_line.startswith("from rally.common.objects")
or logical_line.startswith("import rally.common.objects.")):
yield (0, "N340: Import objects module:"
"`from rally.common import objects`. "
"After that you can use directly objects e.g. objects.Task")
@skip_ignored_lines
def check_no_oslo_deprecated_import(logical_line, filename):
"""Check if oslo.foo packages are not imported instead of oslo_foo ones.
Libraries from oslo.foo namespace are deprecated because of namespace
problems.
N341
"""
if (logical_line.startswith("from oslo.")
or logical_line.startswith("import oslo.")):
yield (0, "N341: Import oslo module: `from oslo_xyz import ...`. "
"The oslo.xyz namespace was deprecated, use oslo_xyz "
"instead")
@skip_ignored_lines
def check_quotes(logical_line, filename):
"""Check that single quotation marks are not used
N350
"""
in_string = False
in_multiline_string = False
single_quotas_are_used = False
check_tripple = (
lambda line, i, char: (
i + 2 < len(line) and
(char == line[i] == line[i + 1] == line[i + 2])
)
)
i = 0
while i < len(logical_line):
char = logical_line[i]
if in_string:
if char == "\"":
in_string = False
if char == "\\":
i += 1 # ignore next char
elif in_multiline_string:
if check_tripple(logical_line, i, "\""):
i += 2 # skip next 2 chars
in_multiline_string = False
elif char == "#":
break
elif char == "'":
single_quotas_are_used = True
break
elif char == "\"":
if check_tripple(logical_line, i, "\""):
in_multiline_string = True
i += 3
continue
in_string = True
i += 1
if single_quotas_are_used:
yield (i, "N350 Remove Single quotes")
@skip_ignored_lines
def check_no_constructor_data_struct(logical_line, filename):
"""Check that data structs (lists, dicts) are declared using literals
N351
"""
match = re_no_construct_dict.search(logical_line)
if match:
yield (0, "N351 Remove dict() construct and use literal {}")
match = re_no_construct_list.search(logical_line)
if match:
yield (0, "N351 Remove list() construct and use literal []")
def check_dict_formatting_in_string(logical_line, tokens):
"""Check that strings do not use dict-formatting with a single replacement
N352
"""
# NOTE(stpierre): Can't use @skip_ignored_lines here because it's
# a stupid decorator that only works on functions that take
# (logical_line, filename) as arguments.
if (not logical_line or
logical_line.startswith("#") or
logical_line.endswith("# noqa")):
return
current_string = ""
in_string = False
for token_type, text, start, end, line in tokens:
if token_type == tokenize.STRING:
if not in_string:
current_string = ""
in_string = True
current_string += text.strip("\"")
elif token_type == tokenize.OP:
if not current_string:
continue
# NOTE(stpierre): The string formatting operator % has
# lower precedence than +, so we assume that the logical
# string has concluded whenever we hit an operator of any
# sort. (Most operators don't work for strings anyway.)
# Some string operators do have higher precedence than %,
# though, so you can technically trick this check by doing
# things like:
#
# "%(foo)s" * 1 % {"foo": 1}
# "%(foo)s"[:] % {"foo": 1}
#
# It also will produce false positives if you use explicit
# parenthesized addition for two strings instead of
# concatenation by juxtaposition, e.g.:
#
# ("%(foo)s" + "%(bar)s") % vals
#
# But if you do any of those things, then you deserve all
# of the horrible things that happen to you, and probably
# many more.
in_string = False
if text == "%":
format_keys = set()
for match in re_str_format.finditer(current_string):
format_keys.add(match.group(1))
if len(format_keys) == 1:
yield (0,
"N353 Do not use mapping key string formatting "
"with a single key")
if text != ")":
# NOTE(stpierre): You can have a parenthesized string
# followed by %, so a closing paren doesn't obviate
# the possibility for a substitution operator like
# every other operator does.
current_string = ""
elif token_type in (tokenize.NL, tokenize.COMMENT):
continue
else:
in_string = False
if token_type == tokenize.NEWLINE:
current_string = ""
@skip_ignored_lines
def check_using_unicode(logical_line, filename):
"""Check crosspython unicode usage
N353
"""
if re.search(r"\bunicode\(", logical_line):
yield (0, "N353 'unicode' function is absent in python3. Please "
"use 'six.text_type' instead.")
def factory(register):
register(check_assert_methods_from_mock)
register(check_import_of_logging)
register(no_translate_debug_logs)
register(no_use_conf_debug_check)
register(assert_true_instance)
register(assert_equal_type)
register(assert_equal_none)
register(assert_true_or_false_with_in)
register(assert_equal_in)
register(check_no_direct_rally_objects_import)
register(check_no_oslo_deprecated_import)
register(check_quotes)
register(check_no_constructor_data_struct)
register(check_dict_formatting_in_string)
register(check_using_unicode)
|
hermantai/sorno-py-scripts | refs/heads/master | scripts/sorno_java_deps_graph.py | 1 | #!/usr/bin/env python
"""Prints the class dependency graph given a bunch of java source files.
This script depends on the library sorno-py-scripts. You can find out the
installation detail in https://github.com/hermantai/sorno-py-scripts.
Copyright 2016 Heung Ming Tai
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import argparse
import collections
import logging
import os
import re
import sys
from sorno import loggingutil
_log = logging.getLogger()
_plain_logger = None # will be created in main()
_plain_error_logger = None # will be created in main()
class Graph(object):
def __init__(self):
self.nodes = set()
self.edges = collections.defaultdict(set)
def add_node(self, n):
self.nodes.add(n)
def add_edge(self, src, dst):
self.nodes.add(src)
self.nodes.add(dst)
self.edges[src].add(dst)
def get_edges(self, n):
return [(n, d) for d in self.edges[n]]
class App(object):
"""A console application to do work"""
def __init__(self, args):
"""
Args:
args (argparse.Namespace): The flags for the script.
"""
self.args = args
def run(self):
"""The entry point of the script
"""
m = self.create_classes_to_contents_map(self.args.path)
g = self.extract_class_dependency_graph(m)
indegrees = self.get_indegrees(g)
if self.args.edges:
for n in g.nodes:
for _, dst in g.get_edges(n):
print(n, "->", dst)
else:
self.print_graph(g, indegrees)
return 0
def create_classes_to_contents_map(self, paths):
m = {}
for path in paths:
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
for f in files:
self._set_class_to_content(m, os.path.join(root, f))
else:
self._set_class_to_content(m, path)
return m
def _set_class_to_content(self, classes_to_contents_map, filepath):
name, ext = os.path.splitext(filepath)
if ext != ".java":
return
classes_to_contents_map[os.path.basename(name)] = open(filepath).read()
def extract_class_dependency_graph(self, classes_to_contents_map):
g = Graph()
for c in classes_to_contents_map:
g.add_node(c)
for c, content in classes_to_contents_map.iteritems():
for n in g.nodes:
if c == n:
continue
if re.search(r"\b%s\b" % n, content):
g.add_edge(c, n)
return g
def get_indegrees(self, graph):
indegrees = collections.defaultdict(lambda: 0)
for n in graph.nodes:
for _, dst in graph.get_edges(n):
indegrees[dst] += 1
return indegrees
def print_graph(self, graph, indegrees):
for n in graph.nodes:
if indegrees[n]:
continue
print(n)
self.print_edges(graph, n, 4)
print("-" * 40)
return
def print_edges(self, graph, node, indent=0):
for _, dst in graph.get_edges(node):
print(" " * indent + "->", dst)
self.print_edges(graph, dst, indent + 4)
def parse_args(cmd_args):
description = __doc__.split("Copyright 2016")[0].strip()
parser = argparse.ArgumentParser(
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
"--debug",
action="store_true",
)
parser.add_argument(
"--edges",
action="store_true",
help="Print the edges instead of the graph",
)
parser.add_argument(
"path",
nargs="+",
)
args = parser.parse_args(cmd_args)
return args
def main():
global _plain_logger, _plain_error_logger
args = parse_args(sys.argv[1:])
loggingutil.setup_logger(_log, debug=args.debug)
_plain_logger = loggingutil.create_plain_logger(
"PLAIN",
debug=args.debug,
)
_plain_error_logger = loggingutil.create_plain_logger(
"PLAIN_ERROR",
debug=args.debug,
stdout=False,
)
app = App(args)
sys.exit(app.run())
if __name__ == '__main__':
main()
|
OsirisSPS/osiris-sps | refs/heads/master | client/share/plugins/AF9A4C281070FDB0F34CF417CDB168AB38C8A388/lib/plat-mac/Carbon/Icons.py | 81 | # Generated from 'Icons.h'
def FOUR_CHAR_CODE(x): return x
from Carbon.Files import *
kGenericDocumentIconResource = -4000
kGenericStationeryIconResource = -3985
kGenericEditionFileIconResource = -3989
kGenericApplicationIconResource = -3996
kGenericDeskAccessoryIconResource = -3991
kGenericFolderIconResource = -3999
kPrivateFolderIconResource = -3994
kFloppyIconResource = -3998
kTrashIconResource = -3993
kGenericRAMDiskIconResource = -3988
kGenericCDROMIconResource = -3987
kDesktopIconResource = -3992
kOpenFolderIconResource = -3997
kGenericHardDiskIconResource = -3995
kGenericFileServerIconResource = -3972
kGenericSuitcaseIconResource = -3970
kGenericMoverObjectIconResource = -3969
kGenericPreferencesIconResource = -3971
kGenericQueryDocumentIconResource = -16506
kGenericExtensionIconResource = -16415
kSystemFolderIconResource = -3983
kHelpIconResource = -20271
kAppleMenuFolderIconResource = -3982
genericDocumentIconResource = kGenericDocumentIconResource
genericStationeryIconResource = kGenericStationeryIconResource
genericEditionFileIconResource = kGenericEditionFileIconResource
genericApplicationIconResource = kGenericApplicationIconResource
genericDeskAccessoryIconResource = kGenericDeskAccessoryIconResource
genericFolderIconResource = kGenericFolderIconResource
privateFolderIconResource = kPrivateFolderIconResource
floppyIconResource = kFloppyIconResource
trashIconResource = kTrashIconResource
genericRAMDiskIconResource = kGenericRAMDiskIconResource
genericCDROMIconResource = kGenericCDROMIconResource
desktopIconResource = kDesktopIconResource
openFolderIconResource = kOpenFolderIconResource
genericHardDiskIconResource = kGenericHardDiskIconResource
genericFileServerIconResource = kGenericFileServerIconResource
genericSuitcaseIconResource = kGenericSuitcaseIconResource
genericMoverObjectIconResource = kGenericMoverObjectIconResource
genericPreferencesIconResource = kGenericPreferencesIconResource
genericQueryDocumentIconResource = kGenericQueryDocumentIconResource
genericExtensionIconResource = kGenericExtensionIconResource
systemFolderIconResource = kSystemFolderIconResource
appleMenuFolderIconResource = kAppleMenuFolderIconResource
kStartupFolderIconResource = -3981
kOwnedFolderIconResource = -3980
kDropFolderIconResource = -3979
kSharedFolderIconResource = -3978
kMountedFolderIconResource = -3977
kControlPanelFolderIconResource = -3976
kPrintMonitorFolderIconResource = -3975
kPreferencesFolderIconResource = -3974
kExtensionsFolderIconResource = -3973
kFontsFolderIconResource = -3968
kFullTrashIconResource = -3984
startupFolderIconResource = kStartupFolderIconResource
ownedFolderIconResource = kOwnedFolderIconResource
dropFolderIconResource = kDropFolderIconResource
sharedFolderIconResource = kSharedFolderIconResource
mountedFolderIconResource = kMountedFolderIconResource
controlPanelFolderIconResource = kControlPanelFolderIconResource
printMonitorFolderIconResource = kPrintMonitorFolderIconResource
preferencesFolderIconResource = kPreferencesFolderIconResource
extensionsFolderIconResource = kExtensionsFolderIconResource
fontsFolderIconResource = kFontsFolderIconResource
fullTrashIconResource = kFullTrashIconResource
kThumbnail32BitData = FOUR_CHAR_CODE('it32')
kThumbnail8BitMask = FOUR_CHAR_CODE('t8mk')
kHuge1BitMask = FOUR_CHAR_CODE('ich#')
kHuge4BitData = FOUR_CHAR_CODE('ich4')
kHuge8BitData = FOUR_CHAR_CODE('ich8')
kHuge32BitData = FOUR_CHAR_CODE('ih32')
kHuge8BitMask = FOUR_CHAR_CODE('h8mk')
kLarge1BitMask = FOUR_CHAR_CODE('ICN#')
kLarge4BitData = FOUR_CHAR_CODE('icl4')
kLarge8BitData = FOUR_CHAR_CODE('icl8')
kLarge32BitData = FOUR_CHAR_CODE('il32')
kLarge8BitMask = FOUR_CHAR_CODE('l8mk')
kSmall1BitMask = FOUR_CHAR_CODE('ics#')
kSmall4BitData = FOUR_CHAR_CODE('ics4')
kSmall8BitData = FOUR_CHAR_CODE('ics8')
kSmall32BitData = FOUR_CHAR_CODE('is32')
kSmall8BitMask = FOUR_CHAR_CODE('s8mk')
kMini1BitMask = FOUR_CHAR_CODE('icm#')
kMini4BitData = FOUR_CHAR_CODE('icm4')
kMini8BitData = FOUR_CHAR_CODE('icm8')
kTileIconVariant = FOUR_CHAR_CODE('tile')
kRolloverIconVariant = FOUR_CHAR_CODE('over')
kDropIconVariant = FOUR_CHAR_CODE('drop')
kOpenIconVariant = FOUR_CHAR_CODE('open')
kOpenDropIconVariant = FOUR_CHAR_CODE('odrp')
large1BitMask = kLarge1BitMask
large4BitData = kLarge4BitData
large8BitData = kLarge8BitData
small1BitMask = kSmall1BitMask
small4BitData = kSmall4BitData
small8BitData = kSmall8BitData
mini1BitMask = kMini1BitMask
mini4BitData = kMini4BitData
mini8BitData = kMini8BitData
kAlignNone = 0x00
kAlignVerticalCenter = 0x01
kAlignTop = 0x02
kAlignBottom = 0x03
kAlignHorizontalCenter = 0x04
kAlignAbsoluteCenter = kAlignVerticalCenter | kAlignHorizontalCenter
kAlignCenterTop = kAlignTop | kAlignHorizontalCenter
kAlignCenterBottom = kAlignBottom | kAlignHorizontalCenter
kAlignLeft = 0x08
kAlignCenterLeft = kAlignVerticalCenter | kAlignLeft
kAlignTopLeft = kAlignTop | kAlignLeft
kAlignBottomLeft = kAlignBottom | kAlignLeft
kAlignRight = 0x0C
kAlignCenterRight = kAlignVerticalCenter | kAlignRight
kAlignTopRight = kAlignTop | kAlignRight
kAlignBottomRight = kAlignBottom | kAlignRight
atNone = kAlignNone
atVerticalCenter = kAlignVerticalCenter
atTop = kAlignTop
atBottom = kAlignBottom
atHorizontalCenter = kAlignHorizontalCenter
atAbsoluteCenter = kAlignAbsoluteCenter
atCenterTop = kAlignCenterTop
atCenterBottom = kAlignCenterBottom
atLeft = kAlignLeft
atCenterLeft = kAlignCenterLeft
atTopLeft = kAlignTopLeft
atBottomLeft = kAlignBottomLeft
atRight = kAlignRight
atCenterRight = kAlignCenterRight
atTopRight = kAlignTopRight
atBottomRight = kAlignBottomRight
kTransformNone = 0x00
kTransformDisabled = 0x01
kTransformOffline = 0x02
kTransformOpen = 0x03
kTransformLabel1 = 0x0100
kTransformLabel2 = 0x0200
kTransformLabel3 = 0x0300
kTransformLabel4 = 0x0400
kTransformLabel5 = 0x0500
kTransformLabel6 = 0x0600
kTransformLabel7 = 0x0700
kTransformSelected = 0x4000
kTransformSelectedDisabled = kTransformSelected | kTransformDisabled
kTransformSelectedOffline = kTransformSelected | kTransformOffline
kTransformSelectedOpen = kTransformSelected | kTransformOpen
ttNone = kTransformNone
ttDisabled = kTransformDisabled
ttOffline = kTransformOffline
ttOpen = kTransformOpen
ttLabel1 = kTransformLabel1
ttLabel2 = kTransformLabel2
ttLabel3 = kTransformLabel3
ttLabel4 = kTransformLabel4
ttLabel5 = kTransformLabel5
ttLabel6 = kTransformLabel6
ttLabel7 = kTransformLabel7
ttSelected = kTransformSelected
ttSelectedDisabled = kTransformSelectedDisabled
ttSelectedOffline = kTransformSelectedOffline
ttSelectedOpen = kTransformSelectedOpen
kSelectorLarge1Bit = 0x00000001
kSelectorLarge4Bit = 0x00000002
kSelectorLarge8Bit = 0x00000004
kSelectorLarge32Bit = 0x00000008
kSelectorLarge8BitMask = 0x00000010
kSelectorSmall1Bit = 0x00000100
kSelectorSmall4Bit = 0x00000200
kSelectorSmall8Bit = 0x00000400
kSelectorSmall32Bit = 0x00000800
kSelectorSmall8BitMask = 0x00001000
kSelectorMini1Bit = 0x00010000
kSelectorMini4Bit = 0x00020000
kSelectorMini8Bit = 0x00040000
kSelectorHuge1Bit = 0x01000000
kSelectorHuge4Bit = 0x02000000
kSelectorHuge8Bit = 0x04000000
kSelectorHuge32Bit = 0x08000000
kSelectorHuge8BitMask = 0x10000000
kSelectorAllLargeData = 0x000000FF
kSelectorAllSmallData = 0x0000FF00
kSelectorAllMiniData = 0x00FF0000
# kSelectorAllHugeData = (long)0xFF000000
kSelectorAll1BitData = kSelectorLarge1Bit | kSelectorSmall1Bit | kSelectorMini1Bit | kSelectorHuge1Bit
kSelectorAll4BitData = kSelectorLarge4Bit | kSelectorSmall4Bit | kSelectorMini4Bit | kSelectorHuge4Bit
kSelectorAll8BitData = kSelectorLarge8Bit | kSelectorSmall8Bit | kSelectorMini8Bit | kSelectorHuge8Bit
kSelectorAll32BitData = kSelectorLarge32Bit | kSelectorSmall32Bit | kSelectorHuge32Bit
# kSelectorAllAvailableData = (long)0xFFFFFFFF
svLarge1Bit = kSelectorLarge1Bit
svLarge4Bit = kSelectorLarge4Bit
svLarge8Bit = kSelectorLarge8Bit
svSmall1Bit = kSelectorSmall1Bit
svSmall4Bit = kSelectorSmall4Bit
svSmall8Bit = kSelectorSmall8Bit
svMini1Bit = kSelectorMini1Bit
svMini4Bit = kSelectorMini4Bit
svMini8Bit = kSelectorMini8Bit
svAllLargeData = kSelectorAllLargeData
svAllSmallData = kSelectorAllSmallData
svAllMiniData = kSelectorAllMiniData
svAll1BitData = kSelectorAll1BitData
svAll4BitData = kSelectorAll4BitData
svAll8BitData = kSelectorAll8BitData
# svAllAvailableData = kSelectorAllAvailableData
kSystemIconsCreator = FOUR_CHAR_CODE('macs')
# err = GetIconRef(kOnSystemDisk
kClipboardIcon = FOUR_CHAR_CODE('CLIP')
kClippingUnknownTypeIcon = FOUR_CHAR_CODE('clpu')
kClippingPictureTypeIcon = FOUR_CHAR_CODE('clpp')
kClippingTextTypeIcon = FOUR_CHAR_CODE('clpt')
kClippingSoundTypeIcon = FOUR_CHAR_CODE('clps')
kDesktopIcon = FOUR_CHAR_CODE('desk')
kFinderIcon = FOUR_CHAR_CODE('FNDR')
kFontSuitcaseIcon = FOUR_CHAR_CODE('FFIL')
kFullTrashIcon = FOUR_CHAR_CODE('ftrh')
kGenericApplicationIcon = FOUR_CHAR_CODE('APPL')
kGenericCDROMIcon = FOUR_CHAR_CODE('cddr')
kGenericControlPanelIcon = FOUR_CHAR_CODE('APPC')
kGenericControlStripModuleIcon = FOUR_CHAR_CODE('sdev')
kGenericComponentIcon = FOUR_CHAR_CODE('thng')
kGenericDeskAccessoryIcon = FOUR_CHAR_CODE('APPD')
kGenericDocumentIcon = FOUR_CHAR_CODE('docu')
kGenericEditionFileIcon = FOUR_CHAR_CODE('edtf')
kGenericExtensionIcon = FOUR_CHAR_CODE('INIT')
kGenericFileServerIcon = FOUR_CHAR_CODE('srvr')
kGenericFontIcon = FOUR_CHAR_CODE('ffil')
kGenericFontScalerIcon = FOUR_CHAR_CODE('sclr')
kGenericFloppyIcon = FOUR_CHAR_CODE('flpy')
kGenericHardDiskIcon = FOUR_CHAR_CODE('hdsk')
kGenericIDiskIcon = FOUR_CHAR_CODE('idsk')
kGenericRemovableMediaIcon = FOUR_CHAR_CODE('rmov')
kGenericMoverObjectIcon = FOUR_CHAR_CODE('movr')
kGenericPCCardIcon = FOUR_CHAR_CODE('pcmc')
kGenericPreferencesIcon = FOUR_CHAR_CODE('pref')
kGenericQueryDocumentIcon = FOUR_CHAR_CODE('qery')
kGenericRAMDiskIcon = FOUR_CHAR_CODE('ramd')
kGenericSharedLibaryIcon = FOUR_CHAR_CODE('shlb')
kGenericStationeryIcon = FOUR_CHAR_CODE('sdoc')
kGenericSuitcaseIcon = FOUR_CHAR_CODE('suit')
kGenericURLIcon = FOUR_CHAR_CODE('gurl')
kGenericWORMIcon = FOUR_CHAR_CODE('worm')
kInternationalResourcesIcon = FOUR_CHAR_CODE('ifil')
kKeyboardLayoutIcon = FOUR_CHAR_CODE('kfil')
kSoundFileIcon = FOUR_CHAR_CODE('sfil')
kSystemSuitcaseIcon = FOUR_CHAR_CODE('zsys')
kTrashIcon = FOUR_CHAR_CODE('trsh')
kTrueTypeFontIcon = FOUR_CHAR_CODE('tfil')
kTrueTypeFlatFontIcon = FOUR_CHAR_CODE('sfnt')
kTrueTypeMultiFlatFontIcon = FOUR_CHAR_CODE('ttcf')
kUserIDiskIcon = FOUR_CHAR_CODE('udsk')
kInternationResourcesIcon = kInternationalResourcesIcon
kInternetLocationHTTPIcon = FOUR_CHAR_CODE('ilht')
kInternetLocationFTPIcon = FOUR_CHAR_CODE('ilft')
kInternetLocationAppleShareIcon = FOUR_CHAR_CODE('ilaf')
kInternetLocationAppleTalkZoneIcon = FOUR_CHAR_CODE('ilat')
kInternetLocationFileIcon = FOUR_CHAR_CODE('ilfi')
kInternetLocationMailIcon = FOUR_CHAR_CODE('ilma')
kInternetLocationNewsIcon = FOUR_CHAR_CODE('ilnw')
kInternetLocationNSLNeighborhoodIcon = FOUR_CHAR_CODE('ilns')
kInternetLocationGenericIcon = FOUR_CHAR_CODE('ilge')
kGenericFolderIcon = FOUR_CHAR_CODE('fldr')
kDropFolderIcon = FOUR_CHAR_CODE('dbox')
kMountedFolderIcon = FOUR_CHAR_CODE('mntd')
kOpenFolderIcon = FOUR_CHAR_CODE('ofld')
kOwnedFolderIcon = FOUR_CHAR_CODE('ownd')
kPrivateFolderIcon = FOUR_CHAR_CODE('prvf')
kSharedFolderIcon = FOUR_CHAR_CODE('shfl')
kSharingPrivsNotApplicableIcon = FOUR_CHAR_CODE('shna')
kSharingPrivsReadOnlyIcon = FOUR_CHAR_CODE('shro')
kSharingPrivsReadWriteIcon = FOUR_CHAR_CODE('shrw')
kSharingPrivsUnknownIcon = FOUR_CHAR_CODE('shuk')
kSharingPrivsWritableIcon = FOUR_CHAR_CODE('writ')
kUserFolderIcon = FOUR_CHAR_CODE('ufld')
kWorkgroupFolderIcon = FOUR_CHAR_CODE('wfld')
kGuestUserIcon = FOUR_CHAR_CODE('gusr')
kUserIcon = FOUR_CHAR_CODE('user')
kOwnerIcon = FOUR_CHAR_CODE('susr')
kGroupIcon = FOUR_CHAR_CODE('grup')
kAppearanceFolderIcon = FOUR_CHAR_CODE('appr')
kAppleExtrasFolderIcon = FOUR_CHAR_CODE('aex\xc4')
kAppleMenuFolderIcon = FOUR_CHAR_CODE('amnu')
kApplicationsFolderIcon = FOUR_CHAR_CODE('apps')
kApplicationSupportFolderIcon = FOUR_CHAR_CODE('asup')
kAssistantsFolderIcon = FOUR_CHAR_CODE('ast\xc4')
kColorSyncFolderIcon = FOUR_CHAR_CODE('prof')
kContextualMenuItemsFolderIcon = FOUR_CHAR_CODE('cmnu')
kControlPanelDisabledFolderIcon = FOUR_CHAR_CODE('ctrD')
kControlPanelFolderIcon = FOUR_CHAR_CODE('ctrl')
kControlStripModulesFolderIcon = FOUR_CHAR_CODE('sdv\xc4')
kDocumentsFolderIcon = FOUR_CHAR_CODE('docs')
kExtensionsDisabledFolderIcon = FOUR_CHAR_CODE('extD')
kExtensionsFolderIcon = FOUR_CHAR_CODE('extn')
kFavoritesFolderIcon = FOUR_CHAR_CODE('favs')
kFontsFolderIcon = FOUR_CHAR_CODE('font')
kHelpFolderIcon = FOUR_CHAR_CODE('\xc4hlp')
kInternetFolderIcon = FOUR_CHAR_CODE('int\xc4')
kInternetPlugInFolderIcon = FOUR_CHAR_CODE('\xc4net')
kInternetSearchSitesFolderIcon = FOUR_CHAR_CODE('issf')
kLocalesFolderIcon = FOUR_CHAR_CODE('\xc4loc')
kMacOSReadMeFolderIcon = FOUR_CHAR_CODE('mor\xc4')
kPublicFolderIcon = FOUR_CHAR_CODE('pubf')
kPreferencesFolderIcon = FOUR_CHAR_CODE('prf\xc4')
kPrinterDescriptionFolderIcon = FOUR_CHAR_CODE('ppdf')
kPrinterDriverFolderIcon = FOUR_CHAR_CODE('\xc4prd')
kPrintMonitorFolderIcon = FOUR_CHAR_CODE('prnt')
kRecentApplicationsFolderIcon = FOUR_CHAR_CODE('rapp')
kRecentDocumentsFolderIcon = FOUR_CHAR_CODE('rdoc')
kRecentServersFolderIcon = FOUR_CHAR_CODE('rsrv')
kScriptingAdditionsFolderIcon = FOUR_CHAR_CODE('\xc4scr')
kSharedLibrariesFolderIcon = FOUR_CHAR_CODE('\xc4lib')
kScriptsFolderIcon = FOUR_CHAR_CODE('scr\xc4')
kShutdownItemsDisabledFolderIcon = FOUR_CHAR_CODE('shdD')
kShutdownItemsFolderIcon = FOUR_CHAR_CODE('shdf')
kSpeakableItemsFolder = FOUR_CHAR_CODE('spki')
kStartupItemsDisabledFolderIcon = FOUR_CHAR_CODE('strD')
kStartupItemsFolderIcon = FOUR_CHAR_CODE('strt')
kSystemExtensionDisabledFolderIcon = FOUR_CHAR_CODE('macD')
kSystemFolderIcon = FOUR_CHAR_CODE('macs')
kTextEncodingsFolderIcon = FOUR_CHAR_CODE('\xc4tex')
kUsersFolderIcon = FOUR_CHAR_CODE('usr\xc4')
kUtilitiesFolderIcon = FOUR_CHAR_CODE('uti\xc4')
kVoicesFolderIcon = FOUR_CHAR_CODE('fvoc')
kSystemFolderXIcon = FOUR_CHAR_CODE('macx')
kAppleScriptBadgeIcon = FOUR_CHAR_CODE('scrp')
kLockedBadgeIcon = FOUR_CHAR_CODE('lbdg')
kMountedBadgeIcon = FOUR_CHAR_CODE('mbdg')
kSharedBadgeIcon = FOUR_CHAR_CODE('sbdg')
kAliasBadgeIcon = FOUR_CHAR_CODE('abdg')
kAlertCautionBadgeIcon = FOUR_CHAR_CODE('cbdg')
kAlertNoteIcon = FOUR_CHAR_CODE('note')
kAlertCautionIcon = FOUR_CHAR_CODE('caut')
kAlertStopIcon = FOUR_CHAR_CODE('stop')
kAppleTalkIcon = FOUR_CHAR_CODE('atlk')
kAppleTalkZoneIcon = FOUR_CHAR_CODE('atzn')
kAFPServerIcon = FOUR_CHAR_CODE('afps')
kFTPServerIcon = FOUR_CHAR_CODE('ftps')
kHTTPServerIcon = FOUR_CHAR_CODE('htps')
kGenericNetworkIcon = FOUR_CHAR_CODE('gnet')
kIPFileServerIcon = FOUR_CHAR_CODE('isrv')
kToolbarCustomizeIcon = FOUR_CHAR_CODE('tcus')
kToolbarDeleteIcon = FOUR_CHAR_CODE('tdel')
kToolbarFavoritesIcon = FOUR_CHAR_CODE('tfav')
kToolbarHomeIcon = FOUR_CHAR_CODE('thom')
kAppleLogoIcon = FOUR_CHAR_CODE('capl')
kAppleMenuIcon = FOUR_CHAR_CODE('sapl')
kBackwardArrowIcon = FOUR_CHAR_CODE('baro')
kFavoriteItemsIcon = FOUR_CHAR_CODE('favr')
kForwardArrowIcon = FOUR_CHAR_CODE('faro')
kGridIcon = FOUR_CHAR_CODE('grid')
kHelpIcon = FOUR_CHAR_CODE('help')
kKeepArrangedIcon = FOUR_CHAR_CODE('arng')
kLockedIcon = FOUR_CHAR_CODE('lock')
kNoFilesIcon = FOUR_CHAR_CODE('nfil')
kNoFolderIcon = FOUR_CHAR_CODE('nfld')
kNoWriteIcon = FOUR_CHAR_CODE('nwrt')
kProtectedApplicationFolderIcon = FOUR_CHAR_CODE('papp')
kProtectedSystemFolderIcon = FOUR_CHAR_CODE('psys')
kRecentItemsIcon = FOUR_CHAR_CODE('rcnt')
kShortcutIcon = FOUR_CHAR_CODE('shrt')
kSortAscendingIcon = FOUR_CHAR_CODE('asnd')
kSortDescendingIcon = FOUR_CHAR_CODE('dsnd')
kUnlockedIcon = FOUR_CHAR_CODE('ulck')
kConnectToIcon = FOUR_CHAR_CODE('cnct')
kGenericWindowIcon = FOUR_CHAR_CODE('gwin')
kQuestionMarkIcon = FOUR_CHAR_CODE('ques')
kDeleteAliasIcon = FOUR_CHAR_CODE('dali')
kEjectMediaIcon = FOUR_CHAR_CODE('ejec')
kBurningIcon = FOUR_CHAR_CODE('burn')
kRightContainerArrowIcon = FOUR_CHAR_CODE('rcar')
kIconServicesNormalUsageFlag = 0
kIconServicesCatalogInfoMask = (kFSCatInfoNodeID | kFSCatInfoParentDirID | kFSCatInfoVolume | kFSCatInfoNodeFlags | kFSCatInfoFinderInfo | kFSCatInfoFinderXInfo | kFSCatInfoUserAccess)
kPlotIconRefNormalFlags = 0L
kPlotIconRefNoImage = (1 << 1)
kPlotIconRefNoMask = (1 << 2)
kIconFamilyType = FOUR_CHAR_CODE('icns')
|
timgraham/django-registration | refs/heads/master | registration/backends/default/urls.py | 5 | """
URLconf for registration and activation, using django-registration's
default backend.
If the default behavior of these views is acceptable to you, simply
use a line like this in your root URLconf to set up the default URLs
for registration::
(r'^accounts/', include('registration.backends.default.urls')),
This will also automatically set up the views in
``django.contrib.auth`` at sensible default locations.
If you'd like to customize registration behavior, feel free to set up
your own URL patterns for these views instead.
"""
from django.conf.urls import include
from django.conf.urls import url
from django.conf import settings
from django.views.generic.base import TemplateView
from .views import ActivationView
from .views import RegistrationView
urlpatterns = [
url(r'^activate/complete/$',
TemplateView.as_view(template_name='registration/activation_complete.html'),
name='registration_activation_complete'),
# Activation keys get matched by \w+ instead of the more specific
# [a-fA-F0-9]{40} because a bad activation key should still get to the view;
# that way it can return a sensible "invalid key" message instead of a
# confusing 404.
url(r'^activate/(?P<activation_key>\w+)/$',
ActivationView.as_view(),
name='registration_activate'),
url(r'^register/complete/$',
TemplateView.as_view(template_name='registration/registration_complete.html'),
name='registration_complete'),
url(r'^register/closed/$',
TemplateView.as_view(template_name='registration/registration_closed.html'),
name='registration_disallowed'),
]
if getattr(settings, 'INCLUDE_REGISTER_URL', True):
urlpatterns += [
url(r'^register/$',
RegistrationView.as_view(),
name='registration_register'),
]
if getattr(settings, 'INCLUDE_AUTH_URLS', True):
urlpatterns += [
url(r'', include('registration.auth_urls')),
]
|
YanTangZhai/tf | refs/heads/master | tensorflow/python/client/events_writer_test.py | 25 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the SWIG-wrapped events writer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
from tensorflow.core.framework import summary_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.lib.io import tf_record
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
from tensorflow.python.util import compat
class PywrapeventsWriterTest(test_util.TensorFlowTestCase):
def testWriteEvents(self):
file_prefix = os.path.join(self.get_temp_dir(), "events")
writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(file_prefix))
filename = compat.as_text(writer.FileName())
event_written = event_pb2.Event(
wall_time=123.45, step=67,
summary=summary_pb2.Summary(
value=[summary_pb2.Summary.Value(tag="foo", simple_value=89.0)]))
writer.WriteEvent(event_written)
writer.Flush()
writer.Close()
with self.assertRaises(IOError):
for r in tf_record.tf_record_iterator(filename + "DOES_NOT_EXIST"):
self.assertTrue(False)
reader = tf_record.tf_record_iterator(filename)
event_read = event_pb2.Event()
event_read.ParseFromString(next(reader))
self.assertTrue(event_read.HasField("file_version"))
event_read.ParseFromString(next(reader))
# Second event
self.assertProtoEquals("""
wall_time: 123.45 step: 67
summary { value { tag: 'foo' simple_value: 89.0 } }
""", event_read)
with self.assertRaises(StopIteration):
next(reader)
def testWriteEventInvalidType(self):
class _Invalid(object):
def __str__(self): return "Invalid"
with self.assertRaisesRegexp(TypeError, "Invalid"):
pywrap_tensorflow.EventsWriter(b"foo").WriteEvent(_Invalid())
if __name__ == "__main__":
googletest.main()
|
cpcloud/odo | refs/heads/master | odo/backends/tests/test_bokeh.py | 7 | from __future__ import absolute_import, division, print_function
import pytest
bokeh = pytest.importorskip('bokeh')
from odo.backends.bokeh import convert, pd, ColumnDataSource
import pandas.util.testing as tm
df = pd.DataFrame([[100, 'Alice'],
[200, 'Bob'],
[300, 'Charlie']],
columns=['balance', 'name'])
def test_convert_dataframe_to_cds():
cds = convert(ColumnDataSource, df)
assert cds.data == {'name': ['Alice', 'Bob', 'Charlie'],
'balance': [100, 200, 300]}
df2 = convert(pd.DataFrame, cds)
assert isinstance(df2, pd.DataFrame)
tm.assert_frame_equal(df, df2)
|
nimadini/Teammate | refs/heads/master | init.py | 1 | __author__ = 'stanley'
import jinja2
import os
INDEX_NAME = 'user_basic'
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True) |
wongkwunkit/jinja2 | refs/heads/master | jinja2/exceptions.py | 977 | # -*- coding: utf-8 -*-
"""
jinja2.exceptions
~~~~~~~~~~~~~~~~~
Jinja exceptions.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from jinja2._compat import imap, text_type, PY2, implements_to_string
class TemplateError(Exception):
"""Baseclass for all template errors."""
if PY2:
def __init__(self, message=None):
if message is not None:
message = text_type(message).encode('utf-8')
Exception.__init__(self, message)
@property
def message(self):
if self.args:
message = self.args[0]
if message is not None:
return message.decode('utf-8', 'replace')
def __unicode__(self):
return self.message or u''
else:
def __init__(self, message=None):
Exception.__init__(self, message)
@property
def message(self):
if self.args:
message = self.args[0]
if message is not None:
return message
@implements_to_string
class TemplateNotFound(IOError, LookupError, TemplateError):
"""Raised if a template does not exist."""
# looks weird, but removes the warning descriptor that just
# bogusly warns us about message being deprecated
message = None
def __init__(self, name, message=None):
IOError.__init__(self)
if message is None:
message = name
self.message = message
self.name = name
self.templates = [name]
def __str__(self):
return self.message
class TemplatesNotFound(TemplateNotFound):
"""Like :class:`TemplateNotFound` but raised if multiple templates
are selected. This is a subclass of :class:`TemplateNotFound`
exception, so just catching the base exception will catch both.
.. versionadded:: 2.2
"""
def __init__(self, names=(), message=None):
if message is None:
message = u'none of the templates given were found: ' + \
u', '.join(imap(text_type, names))
TemplateNotFound.__init__(self, names and names[-1] or None, message)
self.templates = list(names)
@implements_to_string
class TemplateSyntaxError(TemplateError):
"""Raised to tell the user that there is a problem with the template."""
def __init__(self, message, lineno, name=None, filename=None):
TemplateError.__init__(self, message)
self.lineno = lineno
self.name = name
self.filename = filename
self.source = None
# this is set to True if the debug.translate_syntax_error
# function translated the syntax error into a new traceback
self.translated = False
def __str__(self):
# for translated errors we only return the message
if self.translated:
return self.message
# otherwise attach some stuff
location = 'line %d' % self.lineno
name = self.filename or self.name
if name:
location = 'File "%s", %s' % (name, location)
lines = [self.message, ' ' + location]
# if the source is set, add the line to the output
if self.source is not None:
try:
line = self.source.splitlines()[self.lineno - 1]
except IndexError:
line = None
if line:
lines.append(' ' + line.strip())
return u'\n'.join(lines)
class TemplateAssertionError(TemplateSyntaxError):
"""Like a template syntax error, but covers cases where something in the
template caused an error at compile time that wasn't necessarily caused
by a syntax error. However it's a direct subclass of
:exc:`TemplateSyntaxError` and has the same attributes.
"""
class TemplateRuntimeError(TemplateError):
"""A generic runtime error in the template engine. Under some situations
Jinja may raise this exception.
"""
class UndefinedError(TemplateRuntimeError):
"""Raised if a template tries to operate on :class:`Undefined`."""
class SecurityError(TemplateRuntimeError):
"""Raised if a template tries to do something insecure if the
sandbox is enabled.
"""
class FilterArgumentError(TemplateRuntimeError):
"""This error is raised if a filter was called with inappropriate
arguments
"""
|
asedunov/intellij-community | refs/heads/master | python/helpers/pydev/_pydev_imps/_pydev_inspect.py | 92 | """Get useful information from live Python objects.
This module encapsulates the interface provided by the internal special
attributes (func_*, co_*, im_*, tb_*, etc.) in a friendlier fashion.
It also provides some help for examining source code and class layout.
Here are some of the useful functions provided by this module:
ismodule(), isclass(), ismethod(), isfunction(), istraceback(),
isframe(), iscode(), isbuiltin(), isroutine() - check object types
getmembers() - get members of an object that satisfy a given condition
getfile(), getsourcefile(), getsource() - find an object's source code
getdoc(), getcomments() - get documentation on an object
getmodule() - determine the module that an object came from
getclasstree() - arrange classes so as to represent their hierarchy
getargspec(), getargvalues() - get info about function arguments
formatargspec(), formatargvalues() - format an argument spec
getouterframes(), getinnerframes() - get info about frames
currentframe() - get the current stack frame
stack(), trace() - get info about frames on the stack or in a traceback
"""
# This module is in the public domain. No warranties.
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__date__ = '1 Jan 2001'
import sys, os, types, string, re, imp, tokenize
# ----------------------------------------------------------- type-checking
def ismodule(object):
"""Return true if the object is a module.
Module objects provide these attributes:
__doc__ documentation string
__file__ filename (missing for built-in modules)"""
return isinstance(object, types.ModuleType)
def isclass(object):
"""Return true if the object is a class.
Class objects provide these attributes:
__doc__ documentation string
__module__ name of module in which this class was defined"""
return isinstance(object, types.ClassType) or hasattr(object, '__bases__')
def ismethod(object):
"""Return true if the object is an instance method.
Instance method objects provide these attributes:
__doc__ documentation string
__name__ name with which this method was defined
im_class class object in which this method belongs
im_func function object containing implementation of method
im_self instance to which this method is bound, or None"""
return isinstance(object, types.MethodType)
def ismethoddescriptor(object):
"""Return true if the object is a method descriptor.
But not if ismethod() or isclass() or isfunction() are true.
This is new in Python 2.2, and, for example, is true of int.__add__.
An object passing this test has a __get__ attribute but not a __set__
attribute, but beyond that the set of attributes varies. __name__ is
usually sensible, and __doc__ often is.
Methods implemented via descriptors that also pass one of the other
tests return false from the ismethoddescriptor() test, simply because
the other tests promise more -- you can, e.g., count on having the
im_func attribute (etc) when an object passes ismethod()."""
return (hasattr(object, "__get__")
and not hasattr(object, "__set__") # else it's a data descriptor
and not ismethod(object) # mutual exclusion
and not isfunction(object)
and not isclass(object))
def isfunction(object):
"""Return true if the object is a user-defined function.
Function objects provide these attributes:
__doc__ documentation string
__name__ name with which this function was defined
func_code code object containing compiled function bytecode
func_defaults tuple of any default values for arguments
func_doc (same as __doc__)
func_globals global namespace in which this function was defined
func_name (same as __name__)"""
return isinstance(object, types.FunctionType)
def istraceback(object):
"""Return true if the object is a traceback.
Traceback objects provide these attributes:
tb_frame frame object at this level
tb_lasti index of last attempted instruction in bytecode
tb_lineno current line number in Python source code
tb_next next inner traceback object (called by this level)"""
return isinstance(object, types.TracebackType)
def isframe(object):
"""Return true if the object is a frame object.
Frame objects provide these attributes:
f_back next outer frame object (this frame's caller)
f_builtins built-in namespace seen by this frame
f_code code object being executed in this frame
f_exc_traceback traceback if raised in this frame, or None
f_exc_type exception type if raised in this frame, or None
f_exc_value exception value if raised in this frame, or None
f_globals global namespace seen by this frame
f_lasti index of last attempted instruction in bytecode
f_lineno current line number in Python source code
f_locals local namespace seen by this frame
f_restricted 0 or 1 if frame is in restricted execution mode
f_trace tracing function for this frame, or None"""
return isinstance(object, types.FrameType)
def iscode(object):
"""Return true if the object is a code object.
Code objects provide these attributes:
co_argcount number of arguments (not including * or ** args)
co_code string of raw compiled bytecode
co_consts tuple of constants used in the bytecode
co_filename name of file in which this code object was created
co_firstlineno number of first line in Python source code
co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg
co_lnotab encoded mapping of line numbers to bytecode indices
co_name name with which this code object was defined
co_names tuple of names of local variables
co_nlocals number of local variables
co_stacksize virtual machine stack space required
co_varnames tuple of names of arguments and local variables"""
return isinstance(object, types.CodeType)
def isbuiltin(object):
"""Return true if the object is a built-in function or method.
Built-in functions and methods provide these attributes:
__doc__ documentation string
__name__ original name of this function or method
__self__ instance to which a method is bound, or None"""
return isinstance(object, types.BuiltinFunctionType)
def isroutine(object):
"""Return true if the object is any kind of function or method."""
return (isbuiltin(object)
or isfunction(object)
or ismethod(object)
or ismethoddescriptor(object))
def getmembers(object, predicate=None):
"""Return all members of an object as (name, value) pairs sorted by name.
Optionally, only return members that satisfy a given predicate."""
results = []
for key in dir(object):
value = getattr(object, key)
if not predicate or predicate(value):
results.append((key, value))
results.sort()
return results
def classify_class_attrs(cls):
"""Return list of attribute-descriptor tuples.
For each name in dir(cls), the return list contains a 4-tuple
with these elements:
0. The name (a string).
1. The kind of attribute this is, one of these strings:
'class method' created via classmethod()
'static method' created via staticmethod()
'property' created via property()
'method' any other flavor of method
'data' not a method
2. The class which defined this attribute (a class).
3. The object as obtained directly from the defining class's
__dict__, not via getattr. This is especially important for
data attributes: C.data is just a data object, but
C.__dict__['data'] may be a data descriptor with additional
info, like a __doc__ string.
"""
mro = getmro(cls)
names = dir(cls)
result = []
for name in names:
# Get the object associated with the name.
# Getting an obj from the __dict__ sometimes reveals more than
# using getattr. Static and class methods are dramatic examples.
if name in cls.__dict__:
obj = cls.__dict__[name]
else:
obj = getattr(cls, name)
# Figure out where it was defined.
homecls = getattr(obj, "__objclass__", None)
if homecls is None:
# search the dicts.
for base in mro:
if name in base.__dict__:
homecls = base
break
# Get the object again, in order to get it from the defining
# __dict__ instead of via getattr (if possible).
if homecls is not None and name in homecls.__dict__:
obj = homecls.__dict__[name]
# Also get the object via getattr.
obj_via_getattr = getattr(cls, name)
# Classify the object.
if isinstance(obj, staticmethod):
kind = "static method"
elif isinstance(obj, classmethod):
kind = "class method"
elif isinstance(obj, property):
kind = "property"
elif (ismethod(obj_via_getattr) or
ismethoddescriptor(obj_via_getattr)):
kind = "method"
else:
kind = "data"
result.append((name, kind, homecls, obj))
return result
# ----------------------------------------------------------- class helpers
def _searchbases(cls, accum):
# Simulate the "classic class" search order.
if cls in accum:
return
accum.append(cls)
for base in cls.__bases__:
_searchbases(base, accum)
def getmro(cls):
"Return tuple of base classes (including cls) in method resolution order."
if hasattr(cls, "__mro__"):
return cls.__mro__
else:
result = []
_searchbases(cls, result)
return tuple(result)
# -------------------------------------------------- source code extraction
def indentsize(line):
"""Return the indent size, in spaces, at the start of a line of text."""
expline = string.expandtabs(line)
return len(expline) - len(string.lstrip(expline))
def getdoc(object):
"""Get the documentation string for an object.
All tabs are expanded to spaces. To clean up docstrings that are
indented to line up with blocks of code, any whitespace than can be
uniformly removed from the second line onwards is removed."""
try:
doc = object.__doc__
except AttributeError:
return None
if not isinstance(doc, (str, unicode)):
return None
try:
lines = string.split(string.expandtabs(doc), '\n')
except UnicodeError:
return None
else:
margin = None
for line in lines[1:]:
content = len(string.lstrip(line))
if not content: continue
indent = len(line) - content
if margin is None: margin = indent
else: margin = min(margin, indent)
if margin is not None:
for i in range(1, len(lines)): lines[i] = lines[i][margin:]
return string.join(lines, '\n')
def getfile(object):
"""Work out which source or compiled file an object was defined in."""
if ismodule(object):
if hasattr(object, '__file__'):
return object.__file__
raise TypeError, 'arg is a built-in module'
if isclass(object):
object = sys.modules.get(object.__module__)
if hasattr(object, '__file__'):
return object.__file__
raise TypeError, 'arg is a built-in class'
if ismethod(object):
object = object.im_func
if isfunction(object):
object = object.func_code
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
return object.co_filename
raise TypeError, 'arg is not a module, class, method, ' \
'function, traceback, frame, or code object'
def getmoduleinfo(path):
"""Get the module name, suffix, mode, and module type for a given file."""
filename = os.path.basename(path)
suffixes = map(lambda (suffix, mode, mtype):
(-len(suffix), suffix, mode, mtype), imp.get_suffixes())
suffixes.sort() # try longest suffixes first, in case they overlap
for neglen, suffix, mode, mtype in suffixes:
if filename[neglen:] == suffix:
return filename[:neglen], suffix, mode, mtype
def getmodulename(path):
"""Return the module name for a given file, or None."""
info = getmoduleinfo(path)
if info: return info[0]
def getsourcefile(object):
"""Return the Python source file an object was defined in, if it exists."""
filename = getfile(object)
if string.lower(filename[-4:]) in ['.pyc', '.pyo']:
filename = filename[:-4] + '.py'
for suffix, mode, kind in imp.get_suffixes():
if 'b' in mode and string.lower(filename[-len(suffix):]) == suffix:
# Looks like a binary file. We want to only return a text file.
return None
if os.path.exists(filename):
return filename
def getabsfile(object):
"""Return an absolute path to the source or compiled file for an object.
The idea is for each object to have a unique origin, so this routine
normalizes the result as much as possible."""
return os.path.normcase(
os.path.abspath(getsourcefile(object) or getfile(object)))
modulesbyfile = {}
def getmodule(object):
"""Return the module an object was defined in, or None if not found."""
if ismodule(object):
return object
if isclass(object):
return sys.modules.get(object.__module__)
try:
file = getabsfile(object)
except TypeError:
return None
if modulesbyfile.has_key(file):
return sys.modules[modulesbyfile[file]]
for module in sys.modules.values():
if hasattr(module, '__file__'):
modulesbyfile[getabsfile(module)] = module.__name__
if modulesbyfile.has_key(file):
return sys.modules[modulesbyfile[file]]
main = sys.modules['__main__']
if hasattr(main, object.__name__):
mainobject = getattr(main, object.__name__)
if mainobject is object:
return main
builtin = sys.modules['__builtin__']
if hasattr(builtin, object.__name__):
builtinobject = getattr(builtin, object.__name__)
if builtinobject is object:
return builtin
def findsource(object):
"""Return the entire source file and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of all the lines
in the file and the line number indexes a line in that list. An IOError
is raised if the source code cannot be retrieved."""
try:
file = open(getsourcefile(object))
except (TypeError, IOError):
raise IOError, 'could not get source code'
lines = file.readlines()
file.close()
if ismodule(object):
return lines, 0
if isclass(object):
name = object.__name__
pat = re.compile(r'^\s*class\s*' + name + r'\b')
for i in range(len(lines)):
if pat.match(lines[i]): return lines, i
else: raise IOError, 'could not find class definition'
if ismethod(object):
object = object.im_func
if isfunction(object):
object = object.func_code
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
if not hasattr(object, 'co_firstlineno'):
raise IOError, 'could not find function definition'
lnum = object.co_firstlineno - 1
pat = re.compile(r'^(\s*def\s)|(.*\slambda(:|\s))')
while lnum > 0:
if pat.match(lines[lnum]): break
lnum = lnum - 1
return lines, lnum
raise IOError, 'could not find code object'
def getcomments(object):
"""Get lines of comments immediately preceding an object's source code."""
try: lines, lnum = findsource(object)
except IOError: return None
if ismodule(object):
# Look for a comment block at the top of the file.
start = 0
if lines and lines[0][:2] == '#!': start = 1
while start < len(lines) and string.strip(lines[start]) in ['', '#']:
start = start + 1
if start < len(lines) and lines[start][:1] == '#':
comments = []
end = start
while end < len(lines) and lines[end][:1] == '#':
comments.append(string.expandtabs(lines[end]))
end = end + 1
return string.join(comments, '')
# Look for a preceding block of comments at the same indentation.
elif lnum > 0:
indent = indentsize(lines[lnum])
end = lnum - 1
if end >= 0 and string.lstrip(lines[end])[:1] == '#' and \
indentsize(lines[end]) == indent:
comments = [string.lstrip(string.expandtabs(lines[end]))]
if end > 0:
end = end - 1
comment = string.lstrip(string.expandtabs(lines[end]))
while comment[:1] == '#' and indentsize(lines[end]) == indent:
comments[:0] = [comment]
end = end - 1
if end < 0: break
comment = string.lstrip(string.expandtabs(lines[end]))
while comments and string.strip(comments[0]) == '#':
comments[:1] = []
while comments and string.strip(comments[-1]) == '#':
comments[-1:] = []
return string.join(comments, '')
class ListReader:
"""Provide a readline() method to return lines from a list of strings."""
def __init__(self, lines):
self.lines = lines
self.index = 0
def readline(self):
i = self.index
if i < len(self.lines):
self.index = i + 1
return self.lines[i]
else: return ''
class EndOfBlock(Exception): pass
class BlockFinder:
"""Provide a tokeneater() method to detect the end of a code block."""
def __init__(self):
self.indent = 0
self.started = 0
self.last = 0
def tokeneater(self, type, token, (srow, scol), (erow, ecol), line):
if not self.started:
if type == tokenize.NAME: self.started = 1
elif type == tokenize.NEWLINE:
self.last = srow
elif type == tokenize.INDENT:
self.indent = self.indent + 1
elif type == tokenize.DEDENT:
self.indent = self.indent - 1
if self.indent == 0: raise EndOfBlock, self.last
elif type == tokenize.NAME and scol == 0:
raise EndOfBlock, self.last
def getblock(lines):
"""Extract the block of code at the top of the given list of lines."""
try:
tokenize.tokenize(ListReader(lines).readline, BlockFinder().tokeneater)
except EndOfBlock, eob:
return lines[:eob.args[0]]
# Fooling the indent/dedent logic implies a one-line definition
return lines[:1]
def getsourcelines(object):
"""Return a list of source lines and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of the lines
corresponding to the object and the line number indicates where in the
original source file the first line of code was found. An IOError is
raised if the source code cannot be retrieved."""
lines, lnum = findsource(object)
if ismodule(object): return lines, 0
else: return getblock(lines[lnum:]), lnum + 1
def getsource(object):
"""Return the text of the source code for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a single string. An
IOError is raised if the source code cannot be retrieved."""
lines, lnum = getsourcelines(object)
return string.join(lines, '')
# --------------------------------------------------- class tree extraction
def walktree(classes, children, parent):
"""Recursive helper function for getclasstree()."""
results = []
classes.sort(lambda a, b: cmp(a.__name__, b.__name__))
for c in classes:
results.append((c, c.__bases__))
if children.has_key(c):
results.append(walktree(children[c], children, c))
return results
def getclasstree(classes, unique=0):
"""Arrange the given list of classes into a hierarchy of nested lists.
Where a nested list appears, it contains classes derived from the class
whose entry immediately precedes the list. Each entry is a 2-tuple
containing a class and a tuple of its base classes. If the 'unique'
argument is true, exactly one entry appears in the returned structure
for each class in the given list. Otherwise, classes using multiple
inheritance and their descendants will appear multiple times."""
children = {}
roots = []
for c in classes:
if c.__bases__:
for parent in c.__bases__:
if not children.has_key(parent):
children[parent] = []
children[parent].append(c)
if unique and parent in classes: break
elif c not in roots:
roots.append(c)
for parent in children.keys():
if parent not in classes:
roots.append(parent)
return walktree(roots, children, None)
# ------------------------------------------------ argument list extraction
# These constants are from Python's compile.h.
CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 1, 2, 4, 8
def getargs(co):
"""Get information about the arguments accepted by a code object.
Three things are returned: (args, varargs, varkw), where 'args' is
a list of argument names (possibly containing nested lists), and
'varargs' and 'varkw' are the names of the * and ** arguments or None."""
if not iscode(co): raise TypeError, 'arg is not a code object'
nargs = co.co_argcount
names = co.co_varnames
args = list(names[:nargs])
step = 0
# The following acrobatics are for anonymous (tuple) arguments.
if not sys.platform.startswith('java'):#Jython doesn't have co_code
code = co.co_code
import dis
for i in range(nargs):
if args[i][:1] in ['', '.']:
stack, remain, count = [], [], []
while step < len(code):
op = ord(code[step])
step = step + 1
if op >= dis.HAVE_ARGUMENT:
opname = dis.opname[op]
value = ord(code[step]) + ord(code[step + 1]) * 256
step = step + 2
if opname in ['UNPACK_TUPLE', 'UNPACK_SEQUENCE']:
remain.append(value)
count.append(value)
elif opname == 'STORE_FAST':
stack.append(names[value])
remain[-1] = remain[-1] - 1
while remain[-1] == 0:
remain.pop()
size = count.pop()
stack[-size:] = [stack[-size:]]
if not remain: break
remain[-1] = remain[-1] - 1
if not remain: break
args[i] = stack[0]
varargs = None
if co.co_flags & CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return args, varargs, varkw
def getargspec(func):
"""Get the names and default values of a function's arguments.
A tuple of four things is returned: (args, varargs, varkw, defaults).
'args' is a list of the argument names (it may contain nested lists).
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments."""
if ismethod(func):
func = func.im_func
if not isfunction(func): raise TypeError, 'arg is not a Python function'
args, varargs, varkw = getargs(func.func_code)
return args, varargs, varkw, func.func_defaults
def getargvalues(frame):
"""Get information about arguments passed into a particular frame.
A tuple of four things is returned: (args, varargs, varkw, locals).
'args' is a list of the argument names (it may contain nested lists).
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'locals' is the locals dictionary of the given frame."""
args, varargs, varkw = getargs(frame.f_code)
return args, varargs, varkw, frame.f_locals
def joinseq(seq):
if len(seq) == 1:
return '(' + seq[0] + ',)'
else:
return '(' + string.join(seq, ', ') + ')'
def strseq(object, convert, join=joinseq):
"""Recursively walk a sequence, stringifying each element."""
if type(object) in [types.ListType, types.TupleType]:
return join(map(lambda o, c=convert, j=join: strseq(o, c, j), object))
else:
return convert(object)
def formatargspec(args, varargs=None, varkw=None, defaults=None,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
join=joinseq):
"""Format an argument spec from the 4 values returned by getargspec.
The first four arguments are (args, varargs, varkw, defaults). The
other four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments."""
specs = []
if defaults:
firstdefault = len(args) - len(defaults)
for i in range(len(args)):
spec = strseq(args[i], formatarg, join)
if defaults and i >= firstdefault:
spec = spec + formatvalue(defaults[i - firstdefault])
specs.append(spec)
if varargs:
specs.append(formatvarargs(varargs))
if varkw:
specs.append(formatvarkw(varkw))
return '(' + string.join(specs, ', ') + ')'
def formatargvalues(args, varargs, varkw, locals,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
join=joinseq):
"""Format an argument spec from the 4 values returned by getargvalues.
The first four arguments are (args, varargs, varkw, locals). The
next four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments."""
def convert(name, locals=locals,
formatarg=formatarg, formatvalue=formatvalue):
return formatarg(name) + formatvalue(locals[name])
specs = []
for i in range(len(args)):
specs.append(strseq(args[i], convert, join))
if varargs:
specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
if varkw:
specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
return '(' + string.join(specs, ', ') + ')'
# -------------------------------------------------- stack frame extraction
def getframeinfo(frame, context=1):
"""Get information about a frame or traceback object.
A tuple of five things is returned: the filename, the line number of
the current line, the function name, a list of lines of context from
the source code, and the index of the current line within that list.
The optional second argument specifies the number of lines of context
to return, which are centered around the current line."""
raise NotImplementedError
# if istraceback(frame):
# frame = frame.tb_frame
# if not isframe(frame):
# raise TypeError, 'arg is not a frame or traceback object'
#
# filename = getsourcefile(frame)
# lineno = getlineno(frame)
# if context > 0:
# start = lineno - 1 - context//2
# try:
# lines, lnum = findsource(frame)
# except IOError:
# lines = index = None
# else:
# start = max(start, 1)
# start = min(start, len(lines) - context)
# lines = lines[start:start+context]
# index = lineno - 1 - start
# else:
# lines = index = None
#
# return (filename, lineno, frame.f_code.co_name, lines, index)
def getlineno(frame):
"""Get the line number from a frame object, allowing for optimization."""
# Written by Marc-Andr Lemburg; revised by Jim Hugunin and Fredrik Lundh.
lineno = frame.f_lineno
code = frame.f_code
if hasattr(code, 'co_lnotab'):
table = code.co_lnotab
lineno = code.co_firstlineno
addr = 0
for i in range(0, len(table), 2):
addr = addr + ord(table[i])
if addr > frame.f_lasti: break
lineno = lineno + ord(table[i + 1])
return lineno
def getouterframes(frame, context=1):
"""Get a list of records for a frame and all higher (calling) frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while frame:
framelist.append((frame,) + getframeinfo(frame, context))
frame = frame.f_back
return framelist
def getinnerframes(tb, context=1):
"""Get a list of records for a traceback's frame and all lower frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while tb:
framelist.append((tb.tb_frame,) + getframeinfo(tb, context))
tb = tb.tb_next
return framelist
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise 'catch me'
except:
return sys.exc_traceback.tb_frame.f_back #@UndefinedVariable
if hasattr(sys, '_getframe'): currentframe = sys._getframe
def stack(context=1):
"""Return a list of records for the stack above the caller's frame."""
return getouterframes(currentframe().f_back, context)
def trace(context=1):
"""Return a list of records for the stack below the current exception."""
return getinnerframes(sys.exc_traceback, context) #@UndefinedVariable
|
cxz1418/yolo_resnet | refs/heads/master | yolo/net _resnet/yolo_net.py | 3 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import re
from yolo.net.net import Net
class YoloNet(Net):
def __init__(self, common_params, net_params, test=False):
"""
common params: a params dict
net_params : a params dict
"""
super(YoloNet, self).__init__(common_params, net_params)
#process params
self.image_size = int(common_params['image_size'])
self.num_classes = int(common_params['num_classes'])
self.cell_size = int(net_params['cell_size'])
self.boxes_per_cell = int(net_params['boxes_per_cell'])
self.batch_size = int(common_params['batch_size'])
self.weight_decay = float(net_params['weight_decay'])
if not test:
self.object_scale = float(net_params['object_scale'])
self.noobject_scale = float(net_params['noobject_scale'])
self.class_scale = float(net_params['class_scale'])
self.coord_scale = float(net_params['coord_scale'])
def inference(self, images):
"""Build the yolo model
Args:
images: 4-D tensor [batch_size, image_height, image_width, channels]
Returns:
predicts: 4-D tensor [batch_size, cell_size, cell_size, num_classes + 5 * boxes_per_cell]
"""
conv_num = 1
temp_conv = self.conv2d('conv' + str(conv_num), images, [7, 7, 3, 64], stride=2)
conv_num += 1
temp_pool = self.max_pool(temp_conv, [2, 2], 2)
temp_conv = self.conv2d('conv' + str(conv_num), temp_pool, [3, 3, 64, 192], stride=1)
conv_num += 1
temp_pool = self.max_pool(temp_conv, [2, 2], 2)
temp_conv = self.conv2d('conv' + str(conv_num), temp_pool, [1, 1, 192, 128], stride=1)
conv_num += 1
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 128, 256], stride=1)
conv_num += 1
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [1, 1, 256, 256], stride=1)
conv_num += 1
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 256, 512], stride=1)
conv_num += 1
temp_conv = self.max_pool(temp_conv, [2, 2], 2)
for i in range(4):
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [1, 1, 512, 256], stride=1)
conv_num += 1
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 256, 512], stride=1)
conv_num += 1
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [1, 1, 512, 512], stride=1)
conv_num += 1
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 512, 1024], stride=1)
conv_num += 1
temp_conv = self.max_pool(temp_conv, [2, 2], 2)
for i in range(2):
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [1, 1, 1024, 512], stride=1)
conv_num += 1
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 512, 1024], stride=1)
conv_num += 1
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 1024, 1024], stride=1)
conv_num += 1
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 1024, 1024], stride=2)
conv_num += 1
#
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 1024, 1024], stride=1)
conv_num += 1
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 1024, 1024], stride=1)
conv_num += 1
#Fully connected layer
local1 = self.local('local1', temp_conv, 49 * 1024, 4096)
local1 = tf.nn.dropout(local1, keep_prob=0.5)
local2 = self.local('local2', local1, 4096, self.cell_size * self.cell_size * ( self.num_classes + 5 * self.boxes_per_cell), leaky=False)
local2 = tf.reshape(local2, [tf.shape(local2)[0], self.cell_size, self.cell_size, self.num_classes + 5 * self.boxes_per_cell])
predicts = local2
return predicts
def iou(self, boxes1, boxes2):
"""calculate ious
Args:
boxes1: 4-D tensor [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL, 4] ====> (x_center, y_center, w, h)
boxes2: 1-D tensor [4] ===> (x_center, y_center, w, h)
Return:
iou: 3-D tensor [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]
"""
boxes1 = tf.pack([boxes1[:, :, :, 0] - boxes1[:, :, :, 2] / 2, boxes1[:, :, :, 1] - boxes1[:, :, :, 3] / 2,
boxes1[:, :, :, 0] + boxes1[:, :, :, 2] / 2, boxes1[:, :, :, 1] + boxes1[:, :, :, 3] / 2])
boxes1 = tf.transpose(boxes1, [1, 2, 3, 0])
boxes2 = tf.pack([boxes2[0] - boxes2[2] / 2, boxes2[1] - boxes2[3] / 2,
boxes2[0] + boxes2[2] / 2, boxes2[1] + boxes2[3] / 2])
#calculate the left up point
lu = tf.maximum(boxes1[:, :, :, 0:2], boxes2[0:2])
rd = tf.minimum(boxes1[:, :, :, 2:], boxes2[2:])
#intersection
intersection = rd - lu
inter_square = intersection[:, :, :, 0] * intersection[:, :, :, 1]
mask = tf.cast(intersection[:, :, :, 0] > 0, tf.float32) * tf.cast(intersection[:, :, :, 1] > 0, tf.float32)
inter_square = mask * inter_square
#calculate the boxs1 square and boxs2 square
square1 = (boxes1[:, :, :, 2] - boxes1[:, :, :, 0]) * (boxes1[:, :, :, 3] - boxes1[:, :, :, 1])
square2 = (boxes2[2] - boxes2[0]) * (boxes2[3] - boxes2[1])
return inter_square/(square1 + square2 - inter_square + 1e-6)
def cond1(self, num, object_num, loss, predict, label, nilboy):
"""
if num < object_num
"""
return num < object_num
def body1(self, num, object_num, loss, predict, labels, nilboy):
"""
calculate loss
Args:
predict: 3-D tensor [cell_size, cell_size, 5 * boxes_per_cell]
labels : [max_objects, 5] (x_center, y_center, w, h, class)
"""
label = labels[num:num+1, :]
label = tf.reshape(label, [-1])
#calculate objects tensor [CELL_SIZE, CELL_SIZE]
min_x = (label[0] - label[2] / 2) / (self.image_size / self.cell_size)
max_x = (label[0] + label[2] / 2) / (self.image_size / self.cell_size)
min_y = (label[1] - label[3] / 2) / (self.image_size / self.cell_size)
max_y = (label[1] + label[3] / 2) / (self.image_size / self.cell_size)
min_x = tf.floor(min_x)
min_y = tf.floor(min_y)
max_x = tf.ceil(max_x)
max_y = tf.ceil(max_y)
temp = tf.cast(tf.stack([max_y - min_y, max_x - min_x]), dtype=tf.int32)
objects = tf.ones(temp, tf.float32)
temp = tf.cast(tf.stack([min_y, self.cell_size - max_y, min_x, self.cell_size - max_x]), tf.int32)
temp = tf.reshape(temp, (2, 2))
objects = tf.pad(objects, temp, "CONSTANT")
#calculate objects tensor [CELL_SIZE, CELL_SIZE]
#calculate responsible tensor [CELL_SIZE, CELL_SIZE]
center_x = label[0] / (self.image_size / self.cell_size)
center_x = tf.floor(center_x)
center_y = label[1] / (self.image_size / self.cell_size)
center_y = tf.floor(center_y)
response = tf.ones([1, 1], tf.float32)
temp = tf.cast(tf.stack([center_y, self.cell_size - center_y - 1, center_x, self.cell_size -center_x - 1]), tf.int32)
temp = tf.reshape(temp, (2, 2))
response = tf.pad(response, temp, "CONSTANT")
#objects = response
#calculate iou_predict_truth [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]
predict_boxes = predict[:, :, self.num_classes + self.boxes_per_cell:]
predict_boxes = tf.reshape(predict_boxes, [self.cell_size, self.cell_size, self.boxes_per_cell, 4])
predict_boxes = predict_boxes * [self.image_size / self.cell_size, self.image_size / self.cell_size, self.image_size, self.image_size]
base_boxes = np.zeros([self.cell_size, self.cell_size, 4])
for y in range(self.cell_size):
for x in range(self.cell_size):
#nilboy
base_boxes[y, x, :] = [self.image_size / self.cell_size * x, self.image_size / self.cell_size * y, 0, 0]
base_boxes = np.tile(np.resize(base_boxes, [self.cell_size, self.cell_size, 1, 4]), [1, 1, self.boxes_per_cell, 1])
predict_boxes = base_boxes + predict_boxes
iou_predict_truth = self.iou(predict_boxes, label[0:4])
#calculate C [cell_size, cell_size, boxes_per_cell]
C = iou_predict_truth * tf.reshape(response, [self.cell_size, self.cell_size, 1])
#calculate I tensor [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]
I = iou_predict_truth * tf.reshape(response, (self.cell_size, self.cell_size, 1))
max_I = tf.reduce_max(I, 2, keep_dims=True)
I = tf.cast((I >= max_I), tf.float32) * tf.reshape(response, (self.cell_size, self.cell_size, 1))
#calculate no_I tensor [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]
no_I = tf.ones_like(I, dtype=tf.float32) - I
p_C = predict[:, :, self.num_classes:self.num_classes + self.boxes_per_cell]
#calculate truth x,y,sqrt_w,sqrt_h 0-D
x = label[0]
y = label[1]
sqrt_w = tf.sqrt(tf.abs(label[2]))
sqrt_h = tf.sqrt(tf.abs(label[3]))
#sqrt_w = tf.abs(label[2])
#sqrt_h = tf.abs(label[3])
#calculate predict p_x, p_y, p_sqrt_w, p_sqrt_h 3-D [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]
p_x = predict_boxes[:, :, :, 0]
p_y = predict_boxes[:, :, :, 1]
#p_sqrt_w = tf.sqrt(tf.abs(predict_boxes[:, :, :, 2])) * ((tf.cast(predict_boxes[:, :, :, 2] > 0, tf.float32) * 2) - 1)
#p_sqrt_h = tf.sqrt(tf.abs(predict_boxes[:, :, :, 3])) * ((tf.cast(predict_boxes[:, :, :, 3] > 0, tf.float32) * 2) - 1)
#p_sqrt_w = tf.sqrt(tf.maximum(0.0, predict_boxes[:, :, :, 2]))
#p_sqrt_h = tf.sqrt(tf.maximum(0.0, predict_boxes[:, :, :, 3]))
#p_sqrt_w = predict_boxes[:, :, :, 2]
#p_sqrt_h = predict_boxes[:, :, :, 3]
p_sqrt_w = tf.sqrt(tf.minimum(self.image_size * 1.0, tf.maximum(0.0, predict_boxes[:, :, :, 2])))
p_sqrt_h = tf.sqrt(tf.minimum(self.image_size * 1.0, tf.maximum(0.0, predict_boxes[:, :, :, 3])))
#calculate truth p 1-D tensor [NUM_CLASSES]
P = tf.one_hot(tf.cast(label[4], tf.int32), self.num_classes, dtype=tf.float32)
#calculate predict p_P 3-D tensor [CELL_SIZE, CELL_SIZE, NUM_CLASSES]
p_P = predict[:, :, 0:self.num_classes]
#class_loss
class_loss = tf.nn.l2_loss(tf.reshape(objects, (self.cell_size, self.cell_size, 1)) * (p_P - P)) * self.class_scale
#class_loss = tf.nn.l2_loss(tf.reshape(response, (self.cell_size, self.cell_size, 1)) * (p_P - P)) * self.class_scale
#object_loss
object_loss = tf.nn.l2_loss(I * (p_C - C)) * self.object_scale
#object_loss = tf.nn.l2_loss(I * (p_C - (C + 1.0)/2.0)) * self.object_scale
#noobject_loss
#noobject_loss = tf.nn.l2_loss(no_I * (p_C - C)) * self.noobject_scale
noobject_loss = tf.nn.l2_loss(no_I * (p_C)) * self.noobject_scale
#coord_loss
coord_loss = (tf.nn.l2_loss(I * (p_x - x)/(self.image_size/self.cell_size)) +
tf.nn.l2_loss(I * (p_y - y)/(self.image_size/self.cell_size)) +
tf.nn.l2_loss(I * (p_sqrt_w - sqrt_w))/ self.image_size +
tf.nn.l2_loss(I * (p_sqrt_h - sqrt_h))/self.image_size) * self.coord_scale
nilboy = I
return num + 1, object_num, [loss[0] + class_loss, loss[1] + object_loss, loss[2] + noobject_loss, loss[3] + coord_loss], predict, labels, nilboy
def loss(self, predicts, labels, objects_num):
"""Add Loss to all the trainable variables
Args:
predicts: 4-D tensor [batch_size, cell_size, cell_size, 5 * boxes_per_cell]
===> (num_classes, boxes_per_cell, 4 * boxes_per_cell)
labels : 3-D tensor of [batch_size, max_objects, 5]
objects_num: 1-D tensor [batch_size]
"""
class_loss = tf.constant(0, tf.float32)
object_loss = tf.constant(0, tf.float32)
noobject_loss = tf.constant(0, tf.float32)
coord_loss = tf.constant(0, tf.float32)
loss = [0, 0, 0, 0]
for i in range(self.batch_size):
predict = predicts[i, :, :, :]
label = labels[i, :, :]
object_num = objects_num[i]
nilboy = tf.ones([7,7,2])
tuple_results = tf.while_loop(self.cond1, self.body1, [tf.constant(0), object_num, [class_loss, object_loss, noobject_loss, coord_loss], predict, label, nilboy])
for j in range(4):
loss[j] = loss[j] + tuple_results[2][j]
nilboy = tuple_results[5]
tf.add_to_collection('losses', (loss[0] + loss[1] + loss[2] + loss[3])/self.batch_size)
tf.scalar_summary('class_loss', loss[0]/self.batch_size)
tf.scalar_summary('object_loss', loss[1]/self.batch_size)
tf.scalar_summary('noobject_loss', loss[2]/self.batch_size)
tf.scalar_summary('coord_loss', loss[3]/self.batch_size)
tf.scalar_summary('weight_loss', tf.add_n(tf.get_collection('losses')) - (loss[0] + loss[1] + loss[2] + loss[3])/self.batch_size )
return tf.add_n(tf.get_collection('losses'), name='total_loss'), nilboy |
wdv4758h/ZipPy | refs/heads/master | edu.uci.python.benchmark/src/benchmarks/sympy/sympy/core/evaluate.py | 2 | from contextlib import contextmanager
global_evaluate = [True]
@contextmanager
def evaluate(x):
""" Control automatic evaluation
This context managers controls whether or not all SymPy functions evaluate
by default.
Note that much of SymPy expects evaluated expressions. This functionality
is experimental and is unlikely to function as intended on large
expressions.
Examples
========
>>> from sympy.abc import x
>>> from sympy.core.evaluate import evaluate
>>> print(x + x)
2*x
>>> with evaluate(False):
... print(x + x)
x + x
"""
old = global_evaluate[0]
global_evaluate[0] = x
yield
global_evaluate[0] = old
|
moorebrett0/node-jira-client | refs/heads/master | Sites/BusinesstasticBlitz/html/node_modules/node-gyp/gyp/pylib/gyp/generator/dump_dependency_json.py | 1534 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import os
import gyp
import gyp.common
import gyp.msvs_emulation
import json
import sys
generator_supports_multiple_toolsets = True
generator_wants_static_library_dependencies_adjusted = False
generator_filelist_paths = {
}
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!).
generator_default_variables[dirname] = 'dir'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
default_variables.setdefault('OS', gyp.common.GetFlavor(params))
flavor = gyp.common.GetFlavor(params)
if flavor =='win':
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
toplevel = params['options'].toplevel_dir
generator_dir = os.path.relpath(params['options'].generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, generator_dir, output_dir, 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': toplevel,
'qualified_out_dir': qualified_out_dir,
}
def GenerateOutput(target_list, target_dicts, data, params):
# Map of target -> list of targets it depends on.
edges = {}
# Queue of targets to visit.
targets_to_visit = target_list[:]
while len(targets_to_visit) > 0:
target = targets_to_visit.pop()
if target in edges:
continue
edges[target] = []
for dep in target_dicts[target].get('dependencies', []):
edges[target].append(dep)
targets_to_visit.append(dep)
try:
filepath = params['generator_flags']['output_dir']
except KeyError:
filepath = '.'
filename = os.path.join(filepath, 'dump.json')
f = open(filename, 'w')
json.dump(edges, f)
f.close()
print 'Wrote json to %s.' % filename
|
soldag/home-assistant | refs/heads/dev | homeassistant/components/rfxtrx/switch.py | 11 | """Support for RFXtrx switches."""
import logging
import RFXtrx as rfxtrxmod
from homeassistant.components.switch import SwitchEntity
from homeassistant.const import CONF_DEVICES, STATE_ON
from homeassistant.core import callback
from . import (
CONF_DATA_BITS,
CONF_SIGNAL_REPETITIONS,
DEFAULT_SIGNAL_REPETITIONS,
DOMAIN,
RfxtrxCommandEntity,
connect_auto_add,
get_device_id,
get_rfx_object,
)
from .const import COMMAND_OFF_LIST, COMMAND_ON_LIST
DATA_SWITCH = f"{DOMAIN}_switch"
_LOGGER = logging.getLogger(__name__)
def supported(event):
"""Return whether an event supports switch."""
return (
isinstance(event.device, rfxtrxmod.LightingDevice)
and not event.device.known_to_be_dimmable
and not event.device.known_to_be_rollershutter
or isinstance(event.device, rfxtrxmod.RfyDevice)
)
async def async_setup_entry(
hass,
config_entry,
async_add_entities,
):
"""Set up config entry."""
discovery_info = config_entry.data
device_ids = set()
# Add switch from config file
entities = []
for packet_id, entity_info in discovery_info[CONF_DEVICES].items():
event = get_rfx_object(packet_id)
if event is None:
_LOGGER.error("Invalid device: %s", packet_id)
continue
if not supported(event):
continue
device_id = get_device_id(
event.device, data_bits=entity_info.get(CONF_DATA_BITS)
)
if device_id in device_ids:
continue
device_ids.add(device_id)
entity = RfxtrxSwitch(
event.device, device_id, entity_info[CONF_SIGNAL_REPETITIONS]
)
entities.append(entity)
async_add_entities(entities)
@callback
def switch_update(event, device_id):
"""Handle sensor updates from the RFXtrx gateway."""
if not supported(event):
return
if device_id in device_ids:
return
device_ids.add(device_id)
_LOGGER.info(
"Added switch (Device ID: %s Class: %s Sub: %s, Event: %s)",
event.device.id_string.lower(),
event.device.__class__.__name__,
event.device.subtype,
"".join(f"{x:02x}" for x in event.data),
)
entity = RfxtrxSwitch(
event.device, device_id, DEFAULT_SIGNAL_REPETITIONS, event=event
)
async_add_entities([entity])
# Subscribe to main RFXtrx events
connect_auto_add(hass, discovery_info, switch_update)
class RfxtrxSwitch(RfxtrxCommandEntity, SwitchEntity):
"""Representation of a RFXtrx switch."""
async def async_added_to_hass(self):
"""Restore device state."""
await super().async_added_to_hass()
if self._event is None:
old_state = await self.async_get_last_state()
if old_state is not None:
self._state = old_state.state == STATE_ON
def _apply_event(self, event):
"""Apply command from rfxtrx."""
super()._apply_event(event)
if event.values["Command"] in COMMAND_ON_LIST:
self._state = True
elif event.values["Command"] in COMMAND_OFF_LIST:
self._state = False
@callback
def _handle_event(self, event, device_id):
"""Check if event applies to me and update."""
if device_id != self._device_id:
return
self._apply_event(event)
self.async_write_ha_state()
@property
def is_on(self):
"""Return true if device is on."""
return self._state
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
await self._async_send(self._device.send_on)
self._state = True
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
await self._async_send(self._device.send_off)
self._state = False
self.async_write_ha_state()
|
tttthemanCorp/CardmeleonAppEngine | refs/heads/master | django/conf/locale/sr_Latn/formats.py | 655 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y.'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y. H:i'
YEAR_MONTH_FORMAT = 'F Y.'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.m.Y.'
SHORT_DATETIME_FORMAT = 'j.m.Y. H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y.', '%d.%m.%y.', # '25.10.2006.', '25.10.06.'
'%d. %m. %Y.', '%d. %m. %y.', # '25. 10. 2006.', '25. 10. 06.'
'%Y-%m-%d', # '2006-10-25'
# '%d. %b %y.', '%d. %B %y.', # '25. Oct 06.', '25. October 06.'
# '%d. %b \'%y.', '%d. %B \'%y.', # '25. Oct '06.', '25. October '06.'
# '%d. %b %Y.', '%d. %B %Y.', # '25. Oct 2006.', '25. October 2006.'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y. %H:%M:%S', # '25.10.2006. 14:30:59'
'%d.%m.%Y. %H:%M', # '25.10.2006. 14:30'
'%d.%m.%Y.', # '25.10.2006.'
'%d.%m.%y. %H:%M:%S', # '25.10.06. 14:30:59'
'%d.%m.%y. %H:%M', # '25.10.06. 14:30'
'%d.%m.%y.', # '25.10.06.'
'%d. %m. %Y. %H:%M:%S', # '25. 10. 2006. 14:30:59'
'%d. %m. %Y. %H:%M', # '25. 10. 2006. 14:30'
'%d. %m. %Y.', # '25. 10. 2006.'
'%d. %m. %y. %H:%M:%S', # '25. 10. 06. 14:30:59'
'%d. %m. %y. %H:%M', # '25. 10. 06. 14:30'
'%d. %m. %y.', # '25. 10. 06.'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
lmorchard/django | refs/heads/master | tests/sitemaps_tests/base.py | 380 | from django.apps import apps
from django.contrib.sites.models import Site
from django.core.cache import cache
from django.test import TestCase, modify_settings, override_settings
from .models import I18nTestModel, TestModel
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.sitemaps'})
@override_settings(ROOT_URLCONF='sitemaps_tests.urls.http')
class SitemapTestsBase(TestCase):
protocol = 'http'
sites_installed = apps.is_installed('django.contrib.sites')
domain = 'example.com' if sites_installed else 'testserver'
def setUp(self):
self.base_url = '%s://%s' % (self.protocol, self.domain)
cache.clear()
# Create an object for sitemap content.
TestModel.objects.create(name='Test Object')
self.i18n_model = I18nTestModel.objects.create(name='Test Object')
@classmethod
def setUpClass(cls):
super(SitemapTestsBase, cls).setUpClass()
# This cleanup is necessary because contrib.sites cache
# makes tests interfere with each other, see #11505
Site.objects.clear_cache()
|
smartfile/django-1.4 | refs/heads/master | django/db/backends/oracle/creation.py | 29 | import sys
import time
from django.db.backends.creation import BaseDatabaseCreation
TEST_DATABASE_PREFIX = 'test_'
PASSWORD = 'Im_a_lumberjack'
class DatabaseCreation(BaseDatabaseCreation):
# This dictionary maps Field objects to their associated Oracle column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
#
# Any format strings starting with "qn_" are quoted before being used in the
# output (the "qn_" prefix is stripped before the lookup is performed.
data_types = {
'AutoField': 'NUMBER(11)',
'BooleanField': 'NUMBER(1) CHECK (%(qn_column)s IN (0,1))',
'CharField': 'NVARCHAR2(%(max_length)s)',
'CommaSeparatedIntegerField': 'VARCHAR2(%(max_length)s)',
'DateField': 'DATE',
'DateTimeField': 'TIMESTAMP',
'DecimalField': 'NUMBER(%(max_digits)s, %(decimal_places)s)',
'FileField': 'NVARCHAR2(%(max_length)s)',
'FilePathField': 'NVARCHAR2(%(max_length)s)',
'FloatField': 'DOUBLE PRECISION',
'IntegerField': 'NUMBER(11)',
'BigIntegerField': 'NUMBER(19)',
'IPAddressField': 'VARCHAR2(15)',
'GenericIPAddressField': 'VARCHAR2(39)',
'NullBooleanField': 'NUMBER(1) CHECK ((%(qn_column)s IN (0,1)) OR (%(qn_column)s IS NULL))',
'OneToOneField': 'NUMBER(11)',
'PositiveIntegerField': 'NUMBER(11) CHECK (%(qn_column)s >= 0)',
'PositiveSmallIntegerField': 'NUMBER(11) CHECK (%(qn_column)s >= 0)',
'SlugField': 'NVARCHAR2(%(max_length)s)',
'SmallIntegerField': 'NUMBER(11)',
'TextField': 'NCLOB',
'TimeField': 'TIMESTAMP',
'URLField': 'VARCHAR2(%(max_length)s)',
}
def __init__(self, connection):
super(DatabaseCreation, self).__init__(connection)
def _create_test_db(self, verbosity=1, autoclobber=False):
TEST_NAME = self._test_database_name()
TEST_USER = self._test_database_user()
TEST_PASSWD = self._test_database_passwd()
TEST_TBLSPACE = self._test_database_tblspace()
TEST_TBLSPACE_TMP = self._test_database_tblspace_tmp()
parameters = {
'dbname': TEST_NAME,
'user': TEST_USER,
'password': TEST_PASSWD,
'tblspace': TEST_TBLSPACE,
'tblspace_temp': TEST_TBLSPACE_TMP,
}
cursor = self.connection.cursor()
if self._test_database_create():
try:
self._execute_test_db_creation(cursor, parameters, verbosity)
except Exception, e:
sys.stderr.write("Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = raw_input("It appears the test database, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % TEST_NAME)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print "Destroying old test database '%s'..." % self.connection.alias
self._execute_test_db_destruction(cursor, parameters, verbosity)
self._execute_test_db_creation(cursor, parameters, verbosity)
except Exception, e:
sys.stderr.write("Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print "Tests cancelled."
sys.exit(1)
if self._test_user_create():
if verbosity >= 1:
print "Creating test user..."
try:
self._create_test_user(cursor, parameters, verbosity)
except Exception, e:
sys.stderr.write("Got an error creating the test user: %s\n" % e)
if not autoclobber:
confirm = raw_input("It appears the test user, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % TEST_USER)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print "Destroying old test user..."
self._destroy_test_user(cursor, parameters, verbosity)
if verbosity >= 1:
print "Creating test user..."
self._create_test_user(cursor, parameters, verbosity)
except Exception, e:
sys.stderr.write("Got an error recreating the test user: %s\n" % e)
sys.exit(2)
else:
print "Tests cancelled."
sys.exit(1)
self.connection.settings_dict['SAVED_USER'] = self.connection.settings_dict['USER']
self.connection.settings_dict['SAVED_PASSWORD'] = self.connection.settings_dict['PASSWORD']
self.connection.settings_dict['TEST_USER'] = self.connection.settings_dict['USER'] = TEST_USER
self.connection.settings_dict['PASSWORD'] = TEST_PASSWD
return self.connection.settings_dict['NAME']
def _destroy_test_db(self, test_database_name, verbosity=1):
"""
Destroy a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
TEST_NAME = self._test_database_name()
TEST_USER = self._test_database_user()
TEST_PASSWD = self._test_database_passwd()
TEST_TBLSPACE = self._test_database_tblspace()
TEST_TBLSPACE_TMP = self._test_database_tblspace_tmp()
self.connection.settings_dict['USER'] = self.connection.settings_dict['SAVED_USER']
self.connection.settings_dict['PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD']
parameters = {
'dbname': TEST_NAME,
'user': TEST_USER,
'password': TEST_PASSWD,
'tblspace': TEST_TBLSPACE,
'tblspace_temp': TEST_TBLSPACE_TMP,
}
cursor = self.connection.cursor()
time.sleep(1) # To avoid "database is being accessed by other users" errors.
if self._test_user_create():
if verbosity >= 1:
print 'Destroying test user...'
self._destroy_test_user(cursor, parameters, verbosity)
if self._test_database_create():
if verbosity >= 1:
print 'Destroying test database tables...'
self._execute_test_db_destruction(cursor, parameters, verbosity)
self.connection.close()
def _execute_test_db_creation(self, cursor, parameters, verbosity):
if verbosity >= 2:
print "_create_test_db(): dbname = %s" % parameters['dbname']
statements = [
"""CREATE TABLESPACE %(tblspace)s
DATAFILE '%(tblspace)s.dbf' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE 200M
""",
"""CREATE TEMPORARY TABLESPACE %(tblspace_temp)s
TEMPFILE '%(tblspace_temp)s.dbf' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE 100M
""",
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _create_test_user(self, cursor, parameters, verbosity):
if verbosity >= 2:
print "_create_test_user(): username = %s" % parameters['user']
statements = [
"""CREATE USER %(user)s
IDENTIFIED BY %(password)s
DEFAULT TABLESPACE %(tblspace)s
TEMPORARY TABLESPACE %(tblspace_temp)s
QUOTA UNLIMITED ON %(tblspace)s
""",
"""GRANT CONNECT, RESOURCE TO %(user)s""",
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _execute_test_db_destruction(self, cursor, parameters, verbosity):
if verbosity >= 2:
print "_execute_test_db_destruction(): dbname=%s" % parameters['dbname']
statements = [
'DROP TABLESPACE %(tblspace)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
'DROP TABLESPACE %(tblspace_temp)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _destroy_test_user(self, cursor, parameters, verbosity):
if verbosity >= 2:
print "_destroy_test_user(): user=%s" % parameters['user']
print "Be patient. This can take some time..."
statements = [
'DROP USER %(user)s CASCADE',
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _execute_statements(self, cursor, statements, parameters, verbosity):
for template in statements:
stmt = template % parameters
if verbosity >= 2:
print stmt
try:
cursor.execute(stmt)
except Exception, err:
sys.stderr.write("Failed (%s)\n" % (err))
raise
def _test_database_name(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
try:
if self.connection.settings_dict['TEST_NAME']:
name = self.connection.settings_dict['TEST_NAME']
except AttributeError:
pass
return name
def _test_database_create(self):
return self.connection.settings_dict.get('TEST_CREATE', True)
def _test_user_create(self):
return self.connection.settings_dict.get('TEST_USER_CREATE', True)
def _test_database_user(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['USER']
try:
if self.connection.settings_dict['TEST_USER']:
name = self.connection.settings_dict['TEST_USER']
except KeyError:
pass
return name
def _test_database_passwd(self):
name = PASSWORD
try:
if self.connection.settings_dict['TEST_PASSWD']:
name = self.connection.settings_dict['TEST_PASSWD']
except KeyError:
pass
return name
def _test_database_tblspace(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
try:
if self.connection.settings_dict['TEST_TBLSPACE']:
name = self.connection.settings_dict['TEST_TBLSPACE']
except KeyError:
pass
return name
def _test_database_tblspace_tmp(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME'] + '_temp'
try:
if self.connection.settings_dict['TEST_TBLSPACE_TMP']:
name = self.connection.settings_dict['TEST_TBLSPACE_TMP']
except KeyError:
pass
return name
def _get_test_db_name(self):
"""
We need to return the 'production' DB name to get the test DB creation
machinery to work. This isn't a great deal in this case because DB
names as handled by Django haven't real counterparts in Oracle.
"""
return self.connection.settings_dict['NAME']
def test_db_signature(self):
settings_dict = self.connection.settings_dict
return (
settings_dict['HOST'],
settings_dict['PORT'],
settings_dict['ENGINE'],
settings_dict['NAME'],
self._test_database_user(),
)
def set_autocommit(self):
self.connection.connection.autocommit = True
|
Ahmad31/Web_Flask_Cassandra | refs/heads/master | flask/lib/python2.7/site-packages/whoosh/automata/lev.py | 52 | from __future__ import print_function
from whoosh.compat import unichr, xrange
from whoosh.automata.fsa import ANY, EPSILON, NFA, unull
def levenshtein_automaton(term, k, prefix=0):
nfa = NFA((0, 0))
if prefix:
for i in xrange(prefix):
c = term[i]
nfa.add_transition((i, 0), c, (i + 1, 0))
for i in xrange(prefix, len(term)):
c = term[i]
for e in xrange(k + 1):
# Correct character
nfa.add_transition((i, e), c, (i + 1, e))
if e < k:
# Deletion
nfa.add_transition((i, e), ANY, (i, e + 1))
# Insertion
nfa.add_transition((i, e), EPSILON, (i + 1, e + 1))
# Substitution
nfa.add_transition((i, e), ANY, (i + 1, e + 1))
for e in xrange(k + 1):
if e < k:
nfa.add_transition((len(term), e), ANY, (len(term), e + 1))
nfa.add_final_state((len(term), e))
return nfa
|
tylerlaberge/PyPattyrn | refs/heads/master | pypattyrn/behavioral/visitor.py | 1 | from abc import ABCMeta, abstractmethod
class Visitor(metaclass=ABCMeta):
"""
Abstract Visitor class as part of the Visitor Design Pattern.
- External Usage documentation: U{https://github.com/tylerlaberge/PyPattyrn#visitor-pattern}
- External Visitor Design Pattern documentation: U{https://en.wikipedia.org/wiki/Visitor_pattern}
"""
def visit(self, node, *args, **kwargs):
"""
Visit the visitor with some object.
@param node: An object to call a visitor method with.
@param args: Arguments to go with the visitor method call.
@param kwargs: Keyword arguments to go with the visitor method call.
@return: The return value of the method that was called for visiting object.
"""
method = None
for cls in node.__class__.__mro__:
method_name = 'visit_' + cls.__name__.lower()
method = getattr(self, method_name, None)
if method:
break
if not method:
method = self.generic_visit
return method(node, *args, **kwargs)
@abstractmethod
def generic_visit(self, node, *args, **kwargs):
"""
The method to call if no methods were found for a visiting object.
@param node: An object to call a visitor method with.
@param args: Arguments to go with the visitor method call.
@param kwargs: Keyword arguments to go with the visitor method call.
"""
class Visitee(object):
"""
A base class for objects that wish to be able to be visited by a Visitor class.
- External Usage documentation: U{https://github.com/tylerlaberge/PyPattyrn#behavioral-patterns}
- External Visitor Design Pattern documentation: U{https://en.wikipedia.org/wiki/Visitor_pattern}
"""
def accept(self, visitor, *args, **kwargs):
"""
Have a visitor visit this class instance.
@param visitor: The visitor to visit.
@type visitor: Visitor
@param args: Any args to send with the visit.
@param kwargs: Any kwargs to send with the visit.
"""
return visitor.visit(self, *args, **kwargs)
|
TangHao1987/intellij-community | refs/heads/master | python/lib/Lib/encodings/utf_7.py | 116 | """ Python 'utf-7' Codec
Written by Brian Quinlan (brian@sweetapp.com).
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.utf_7_encode
decode = codecs.utf_7_decode
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.utf_7_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
def _buffer_decode(self, input, errors, final):
return codecs.utf_7_decode(input, self.errors)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='utf-7',
encode=Codec.encode,
decode=Codec.decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
kool79/intellij-community | refs/heads/master | python/testData/inspections/PyUnresolvedReferencesInspection/builtinDerivedClassAttribute.py | 83 | class C(dict):
def foo(self):
pass
def bar(self):
self.foo() #pass |
zhenzhai/edx-platform | refs/heads/master | common/djangoapps/static_replace/__init__.py | 8 | import logging
import re
from django.contrib.staticfiles.storage import staticfiles_storage
from django.contrib.staticfiles import finders
from django.conf import settings
from static_replace.models import AssetBaseUrlConfig, AssetExcludedExtensionsConfig
from xmodule.modulestore.django import modulestore
from xmodule.modulestore import ModuleStoreEnum
from xmodule.contentstore.content import StaticContent
from opaque_keys.edx.locator import AssetLocator
log = logging.getLogger(__name__)
XBLOCK_STATIC_RESOURCE_PREFIX = '/static/xblock'
def _url_replace_regex(prefix):
"""
Match static urls in quotes that don't end in '?raw'.
To anyone contemplating making this more complicated:
http://xkcd.com/1171/
"""
return ur"""
(?x) # flags=re.VERBOSE
(?P<quote>\\?['"]) # the opening quotes
(?P<prefix>{prefix}) # the prefix
(?P<rest>.*?) # everything else in the url
(?P=quote) # the first matching closing quote
""".format(prefix=prefix)
def try_staticfiles_lookup(path):
"""
Try to lookup a path in staticfiles_storage. If it fails, return
a dead link instead of raising an exception.
"""
try:
url = staticfiles_storage.url(path)
except Exception as err:
log.warning("staticfiles_storage couldn't find path {0}: {1}".format(
path, str(err)))
# Just return the original path; don't kill everything.
url = path
return url
def replace_jump_to_id_urls(text, course_id, jump_to_id_base_url):
"""
This will replace a link to another piece of courseware to a 'jump_to'
URL that will redirect to the right place in the courseware
NOTE: This is similar to replace_course_urls in terms of functionality
but it is intended to be used when we only have a 'id' that the
course author provides. This is much more helpful when using
Studio authored courses since they don't need to know the path. This
is also durable with respect to item moves.
text: The content over which to perform the subtitutions
course_id: The course_id in which this rewrite happens
jump_to_id_base_url:
A app-tier (e.g. LMS) absolute path to the base of the handler that will perform the
redirect. e.g. /courses/<org>/<course>/<run>/jump_to_id. NOTE the <id> will be appended to
the end of this URL at re-write time
output: <text> after the link rewriting rules are applied
"""
def replace_jump_to_id_url(match):
quote = match.group('quote')
rest = match.group('rest')
return "".join([quote, jump_to_id_base_url + rest, quote])
return re.sub(_url_replace_regex('/jump_to_id/'), replace_jump_to_id_url, text)
def replace_course_urls(text, course_key):
"""
Replace /course/$stuff urls with /courses/$course_id/$stuff urls
text: The text to replace
course_module: A CourseDescriptor
returns: text with the links replaced
"""
course_id = course_key.to_deprecated_string()
def replace_course_url(match):
quote = match.group('quote')
rest = match.group('rest')
return "".join([quote, '/courses/' + course_id + '/', rest, quote])
return re.sub(_url_replace_regex('/course/'), replace_course_url, text)
def process_static_urls(text, replacement_function, data_dir=None):
"""
Run an arbitrary replacement function on any urls matching the static file
directory
"""
def wrap_part_extraction(match):
"""
Unwraps a match group for the captures specified in _url_replace_regex
and forward them on as function arguments
"""
original = match.group(0)
prefix = match.group('prefix')
quote = match.group('quote')
rest = match.group('rest')
# Don't rewrite XBlock resource links. Probably wasn't a good idea that /static
# works for actual static assets and for magical course asset URLs....
full_url = prefix + rest
if full_url.startswith(XBLOCK_STATIC_RESOURCE_PREFIX):
return original
return replacement_function(original, prefix, quote, rest)
return re.sub(
_url_replace_regex(u'(?:{static_url}|/static/)(?!{data_dir})'.format(
static_url=settings.STATIC_URL,
data_dir=data_dir
)),
wrap_part_extraction,
text
)
def make_static_urls_absolute(request, html):
"""
Converts relative URLs referencing static assets to absolute URLs
"""
def replace(__, prefix, quote, rest):
"""
Function to actually do a single relative -> absolute url replacement
"""
processed = request.build_absolute_uri(prefix + rest)
return quote + processed + quote
return process_static_urls(
html,
replace
)
def replace_static_urls(text, data_directory=None, course_id=None, static_asset_path=''):
"""
Replace /static/$stuff urls either with their correct url as generated by collectstatic,
(/static/$md5_hashed_stuff) or by the course-specific content static url
/static/$course_data_dir/$stuff, or, if course_namespace is not None, by the
correct url in the contentstore (/c4x/.. or /asset-loc:..)
text: The source text to do the substitution in
data_directory: The directory in which course data is stored
course_id: The course identifier used to distinguish static content for this course in studio
static_asset_path: Path for static assets, which overrides data_directory and course_namespace, if nonempty
"""
def replace_static_url(original, prefix, quote, rest):
"""
Replace a single matched url.
"""
# Don't mess with things that end in '?raw'
if rest.endswith('?raw'):
return original
# In debug mode, if we can find the url as is,
if settings.DEBUG and finders.find(rest, True):
return original
# if we're running with a MongoBacked store course_namespace is not None, then use studio style urls
elif (not static_asset_path) and course_id:
# first look in the static file pipeline and see if we are trying to reference
# a piece of static content which is in the edx-platform repo (e.g. JS associated with an xmodule)
exists_in_staticfiles_storage = False
try:
exists_in_staticfiles_storage = staticfiles_storage.exists(rest)
except Exception as err:
log.warning("staticfiles_storage couldn't find path {0}: {1}".format(
rest, str(err)))
if exists_in_staticfiles_storage:
url = staticfiles_storage.url(rest)
else:
# if not, then assume it's courseware specific content and then look in the
# Mongo-backed database
base_url = AssetBaseUrlConfig.get_base_url()
excluded_exts = AssetExcludedExtensionsConfig.get_excluded_extensions()
url = StaticContent.get_canonicalized_asset_path(course_id, rest, base_url, excluded_exts)
if AssetLocator.CANONICAL_NAMESPACE in url:
url = url.replace('block@', 'block/', 1)
# Otherwise, look the file up in staticfiles_storage, and append the data directory if needed
else:
course_path = "/".join((static_asset_path or data_directory, rest))
try:
if staticfiles_storage.exists(rest):
url = staticfiles_storage.url(rest)
else:
url = staticfiles_storage.url(course_path)
# And if that fails, assume that it's course content, and add manually data directory
except Exception as err:
log.warning("staticfiles_storage couldn't find path {0}: {1}".format(
rest, str(err)))
url = "".join([prefix, course_path])
return "".join([quote, url, quote])
return process_static_urls(text, replace_static_url, data_dir=static_asset_path or data_directory)
|
wileeam/airflow | refs/heads/master | airflow/operators/hive_to_mysql.py | 4 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.apache.hive.operators.hive_to_mysql`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.apache.hive.operators.hive_to_mysql import HiveToMySqlTransfer # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.apache.hive.operators.hive_to_mysql`.",
DeprecationWarning, stacklevel=2
)
|
cyberphox/MissionPlanner | refs/heads/master | Lib/site-packages/numpy/distutils/command/install_data.py | 112 | import sys
have_setuptools = ('setuptools' in sys.modules)
from distutils.command.install_data import install_data as old_install_data
#data installer with improved intelligence over distutils
#data files are copied into the project directory instead
#of willy-nilly
class install_data (old_install_data):
def run(self):
old_install_data.run(self)
if have_setuptools:
# Run install_clib again, since setuptools does not run sub-commands
# of install automatically
self.run_command('install_clib')
def finalize_options (self):
self.set_undefined_options('install',
('install_lib', 'install_dir'),
('root', 'root'),
('force', 'force'),
)
|
tumi8/sKnock | refs/heads/master | server/benchmarks/ap_firewall.py | 1 | import logging
import os
import time
from definitions.Constants import *
from server.modules.Configuration import config, initialize
from server.modules.Firewall.Firewall import Firewall
LOG = logging.getLogger(__name__)
def benchmark(ipv4 = True, ipv6 = False, tcp = True, udp = False, csvOutput = '/tmp'):
initialize()
config.PORT_OPEN_DURATION_IN_SECONDS = 100
config.firewallPolicy = 'none'
firewallHandler = Firewall(config)
firewallHandler.startup()
# Begin Benchmark
# WARNING: total time is only approximation by summing up the single operation times and subtracting the estimated time wasted on the timing of the functions
# Calibration
timingCost = 0
for i in xrange(5):
prev_time = time.time()
for i in xrange(10000000):
currTime = time.time()
newTimingCost = (currTime - prev_time) * 1000
timingCost = min(timingCost, newTimingCost) if timingCost != 0 else newTimingCost
perTimingError = timingCost / 10000000
LOG.info('Calculated accumulated timing cost: %s ms per 10m Operations', round(timingCost, 4))
v4_rulesetSize = 0
v6_rulesetSize = 0
if ipv4 and tcp:
baconFile = open(os.path.join(csvOutput, 'ap_firewall_rulesetsize_vs_operationtime_ipv4_tcp_open.csv'), 'w')
baconFile.write("Number of Rules in Chain, Time for a single IPv4/TCP port-open operation [ms]\n")
approxTotalTime = 0
for i in xrange(0, 65536):
prev_time = time.time()
firewallHandler.openPortForClient(i, IP_VERSION.V4, PROTOCOL.TCP, '1.1.1.1')
currTime = time.time()
computationTime = (currTime-prev_time) * 1000 - perTimingError
approxTotalTime += computationTime
LOG.info('Computation Time for Operation: %f', computationTime)
baconFile.write("%d,%s\n" % (v4_rulesetSize + i, round(computationTime, 2)))
v4_rulesetSize += 65536
LOG.info('Approximate total time for opening 65536 IPv4 TCP ports: %s', round(approxTotalTime, 2))
baconFile.close()
if ipv4 and udp:
baconFile = open(os.path.join(csvOutput, 'ap_firewall_rulesetsize_vs_operationtime_ipv4_udp_open.csv'), 'w')
baconFile.write("Number of Rules in Chain, Time for a single IPv4/UDP port-open operation [ms]\n")
approxTotalTime = 0
for i in xrange(0, 65536):
prev_time = time.time()
firewallHandler.openPortForClient(i, IP_VERSION.V4, PROTOCOL.UDP, '1.1.1.1')
currTime = time.time()
computationTime = (currTime-prev_time) * 1000 - perTimingError
approxTotalTime += computationTime
LOG.info('Computation Time for Operation: %f', computationTime)
baconFile.write("%d,%s\n" % (v4_rulesetSize + i, round(computationTime, 2)))
v4_rulesetSize += 65536
LOG.info('Approximate total time for opening 65536 IPv4 UDP ports: %s', round(approxTotalTime, 2))
baconFile.close()
if ipv6 and tcp:
baconFile = open(os.path.join(csvOutput, 'ap_firewall_rulesetsize_vs_operationtime_ipv6_tcp_open.csv'), 'w')
baconFile.write("Number of Rules in Chain, Time for a single IPv6/TCP port-open operation [ms]\n")
approxTotalTime = 0
for i in xrange(0, 65536):
prev_time = time.time()
firewallHandler.openPortForClient(i, IP_VERSION.V6, PROTOCOL.TCP, '1111::1')
currTime = time.time()
computationTime = (currTime-prev_time) * 1000 - perTimingError
approxTotalTime += computationTime
LOG.info('Computation Time for Operation: %f', computationTime)
baconFile.write("%d,%s\n" % (v6_rulesetSize + i, round(computationTime, 2)))
v6_rulesetSize += 65536
LOG.info('Approximate total time for opening 65536 IPv6 TCP ports: %s', round(approxTotalTime, 2))
baconFile.close()
if ipv6 and udp:
baconFile = open(os.path.join(csvOutput, 'ap_firewall_rulesetsize_vs_operationtime_ipv6_udp_open.csv'), 'w')
baconFile.write("Number of Rules in Chain, Time for a single IPv6/UDP port-open operation [ms]\n")
approxTotalTime = 0
for i in xrange(0, 65536):
prev_time = time.time()
firewallHandler.openPortForClient(i, IP_VERSION.V6, PROTOCOL.UDP, '1111::1')
currTime = time.time()
computationTime = (currTime-prev_time) * 1000 - perTimingError
approxTotalTime += computationTime
LOG.info('Computation Time for Operation: %f', computationTime)
baconFile.write("%d,%s\n" % (v6_rulesetSize + i, round(computationTime, 2)))
v6_rulesetSize += 65536
LOG.info('Approximate total time for opening 65536 IPv6 UDP ports: %s', round(approxTotalTime, 2))
baconFile.close()
if ipv4 and tcp:
baconFile = open(os.path.join(csvOutput, 'ap_firewall_rulesetsize_vs_operationtime_ipv4_tcp_close.csv'), 'w')
baconFile.write("Number of Rules in Chain, Time for a single IPv4/TCP port-close operation [ms]\n")
approxTotalTime = 0
for i in xrange(0, 65536):
prev_time = time.time()
firewallHandler.closePortForClient(i, IP_VERSION.V4, PROTOCOL.TCP, '1.1.1.1')
currTime = time.time()
computationTime = (currTime-prev_time) * 1000 - perTimingError
approxTotalTime += computationTime
LOG.info('Computation Time for Operation: %f', computationTime)
baconFile.write("%d,%s\n" % (v4_rulesetSize - i, round(computationTime, 2)))
v4_rulesetSize -= 65536
LOG.info('Approximate total time for closing 65536 IPv4 TCP ports: %s', round(approxTotalTime, 2))
baconFile.close()
if ipv4 and udp:
baconFile = open(os.path.join(csvOutput, 'ap_firewall_rulesetsize_vs_operationtime_ipv4_udp_close.csv'), 'w')
baconFile.write("Number of Rules in Chain, Time for a single IPv4/UDP port-close operation [ms]\n")
approxTotalTime = 0
for i in xrange(0, 65536):
prev_time = time.time()
firewallHandler.closePortForClient(i, IP_VERSION.V4, PROTOCOL.UDP, '1.1.1.1')
currTime = time.time()
computationTime = (currTime-prev_time) * 1000 - perTimingError
approxTotalTime += computationTime
LOG.info('Computation Time for Operation: %f', computationTime)
baconFile.write("%d,%s\n" % (v4_rulesetSize - i, round(computationTime, 2)))
v4_rulesetSize -= 65536
LOG.info('Approximate total time for closing 65536 IPv4 UDP ports: %s', round(approxTotalTime, 2))
baconFile.close()
if ipv6 and tcp:
baconFile = open(os.path.join(csvOutput, 'ap_firewall_rulesetsize_vs_operationtime_ipv6_tcp_close.csv'), 'w')
baconFile.write("Number of Rules in Chain, Time for a single IPv6/TCP port-close operation [ms]\n")
approxTotalTime = 0
for i in xrange(0, 65536):
prev_time = time.time()
firewallHandler.closePortForClient(i, IP_VERSION.V6, PROTOCOL.TCP, '1111::1')
currTime = time.time()
computationTime = (currTime-prev_time) * 1000 - perTimingError
approxTotalTime += computationTime
LOG.info('Computation Time for Operation: %f', computationTime)
baconFile.write("%d,%s\n" % (v6_rulesetSize - i, round(computationTime, 2)))
v6_rulesetSize -= 65536
LOG.info('Approximate total time for closing 65536 IPv6 TCP ports: %s', round(approxTotalTime, 2))
baconFile.close()
if ipv6 and udp:
baconFile = open(os.path.join(csvOutput, 'ap_firewall_rulesetsize_vs_operationtime_ipv6_udp_close.csv'), 'w')
baconFile.write("Number of Rules in Chain, Time for a single IPv6/UDP port-close operation [ms]\n")
approxTotalTime = 0
for i in xrange(0, 65536):
prev_time = time.time()
firewallHandler.closePortForClient(i, IP_VERSION.V6, PROTOCOL.UDP, '1111::1')
currTime = time.time()
computationTime = (currTime-prev_time) * 1000 - perTimingError
approxTotalTime += computationTime
LOG.info('Computation Time for Operation: %f', computationTime)
baconFile.write("%d,%s\n" % (v6_rulesetSize - i, round(computationTime, 2)))
v6_rulesetSize -= 65536
LOG.info('Approximate total time for closing 65536 IPv6 UDP ports: %s', round(approxTotalTime, 2))
baconFile.close()
# End Benchmark
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO, filename='/tmp/ap_firewall.log')
benchmark(1,1,1,1) |
CarGroup3/opendlv.scaledcars | refs/heads/master | thirdparty/cxxtest/python/python3/cxxtest/cxxtestgen.py | 48 | #-------------------------------------------------------------------------
# CxxTest: A lightweight C++ unit testing library.
# Copyright (c) 2008 Sandia Corporation.
# This software is distributed under the LGPL License v3
# For more information, see the COPYING file in the top CxxTest directory.
# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
# the U.S. Government retains certain rights in this software.
#-------------------------------------------------------------------------
# vim: fileencoding=utf-8
# the above import important for forward-compatibility with python3,
# which is already the default in archlinux!
__all__ = ['main', 'create_manpage']
from . import __release__
import os
import sys
import re
import glob
from optparse import OptionParser
from . import cxxtest_parser
from string import Template
try:
from . import cxxtest_fog
imported_fog=True
except ImportError:
imported_fog=False
from .cxxtest_misc import abort
try:
from os.path import relpath
except ImportError:
from .cxxtest_misc import relpath
# Global data is initialized by main()
options = []
suites = []
wrotePreamble = 0
wroteWorld = 0
lastIncluded = ''
def main(args=sys.argv, catch=False):
'''The main program'''
#
# Reset global state
#
global wrotePreamble
wrotePreamble=0
global wroteWorld
wroteWorld=0
global lastIncluded
lastIncluded = ''
global suites
suites = []
global options
options = []
#
try:
files = parseCommandline(args)
if imported_fog and options.fog:
[options,suites] = cxxtest_fog.scanInputFiles( files, options )
else:
[options,suites] = cxxtest_parser.scanInputFiles( files, options )
writeOutput()
except SystemExit:
if not catch:
raise
def create_parser(asciidoc=False):
parser = OptionParser("cxxtestgen [options] [<filename> ...]")
if asciidoc:
parser.description="The cxxtestgen command processes C++ header files to perform test discovery, and then it creates files for the CxxTest test runner."
else:
parser.description="The 'cxxtestgen' command processes C++ header files to perform test discovery, and then it creates files for the 'CxxTest' test runner."
parser.add_option("--version",
action="store_true", dest="version", default=False,
help="Write the CxxTest version.")
parser.add_option("-o", "--output",
dest="outputFileName", default=None, metavar="NAME",
help="Write output to file NAME.")
parser.add_option("-w","--world", dest="world", default="cxxtest",
help="The label of the tests, used to name the XML results.")
parser.add_option("", "--include", action="append",
dest="headers", default=[], metavar="HEADER",
help="Include file HEADER in the test runner before other headers.")
parser.add_option("", "--abort-on-fail",
action="store_true", dest="abortOnFail", default=False,
help="Abort tests on failed asserts (like xUnit).")
parser.add_option("", "--main",
action="store", dest="main", default="main",
help="Specify an alternative name for the main() function.")
parser.add_option("", "--headers",
action="store", dest="header_filename", default=None,
help="Specify a filename that contains a list of header files that are processed to generate a test runner.")
parser.add_option("", "--runner",
dest="runner", default="", metavar="CLASS",
help="Create a test runner that processes test events using the class CxxTest::CLASS.")
parser.add_option("", "--gui",
dest="gui", metavar="CLASS",
help="Create a GUI test runner that processes test events using the class CxxTest::CLASS. (deprecated)")
parser.add_option("", "--error-printer",
action="store_true", dest="error_printer", default=False,
help="Create a test runner using the ErrorPrinter class, and allow the use of the standard library.")
parser.add_option("", "--xunit-printer",
action="store_true", dest="xunit_printer", default=False,
help="Create a test runner using the XUnitPrinter class.")
parser.add_option("", "--xunit-file", dest="xunit_file", default="",
help="The file to which the XML summary is written for test runners using the XUnitPrinter class. The default XML filename is TEST-<world>.xml, where <world> is the value of the --world option. (default: cxxtest)")
parser.add_option("", "--have-std",
action="store_true", dest="haveStandardLibrary", default=False,
help="Use the standard library (even if not found in tests).")
parser.add_option("", "--no-std",
action="store_true", dest="noStandardLibrary", default=False,
help="Do not use standard library (even if found in tests).")
parser.add_option("", "--have-eh",
action="store_true", dest="haveExceptionHandling", default=False,
help="Use exception handling (even if not found in tests).")
parser.add_option("", "--no-eh",
action="store_true", dest="noExceptionHandling", default=False,
help="Do not use exception handling (even if found in tests).")
parser.add_option("", "--longlong",
dest="longlong", default=None, metavar="TYPE",
help="Use TYPE as for long long integers. (default: not supported)")
parser.add_option("", "--no-static-init",
action="store_true", dest="noStaticInit", default=False,
help="Do not rely on static initialization in the test runner.")
parser.add_option("", "--template",
dest="templateFileName", default=None, metavar="TEMPLATE",
help="Generate the test runner using file TEMPLATE to define a template.")
parser.add_option("", "--root",
action="store_true", dest="root", default=False,
help="Write the main() function and global data for a test runner.")
parser.add_option("", "--part",
action="store_true", dest="part", default=False,
help="Write the tester classes for a test runner.")
#parser.add_option("", "--factor",
#action="store_true", dest="factor", default=False,
#help="Declare the _CXXTEST_FACTOR macro. (deprecated)")
if imported_fog:
fog_help = "Use new FOG C++ parser"
else:
fog_help = "Use new FOG C++ parser (disabled)"
parser.add_option("-f", "--fog-parser",
action="store_true",
dest="fog",
default=False,
help=fog_help
)
return parser
def parseCommandline(args):
'''Analyze command line arguments'''
global imported_fog
global options
parser = create_parser()
(options, args) = parser.parse_args(args=args)
if not options.header_filename is None:
if not os.path.exists(options.header_filename):
abort( "ERROR: the file '%s' does not exist!" % options.header_filename )
INPUT = open(options.header_filename)
headers = [line.strip() for line in INPUT]
args.extend( headers )
INPUT.close()
if options.fog and not imported_fog:
abort( "Cannot use the FOG parser. Check that the 'ply' package is installed. The 'ordereddict' package is also required if running Python 2.6")
if options.version:
printVersion()
# the cxxtest builder relies on this behaviour! don't remove
if options.runner == 'none':
options.runner = None
if options.xunit_printer or options.runner == "XUnitPrinter":
options.xunit_printer=True
options.runner="XUnitPrinter"
if len(args) > 1:
if options.xunit_file == "":
if options.world == "":
options.world = "cxxtest"
options.xunit_file="TEST-"+options.world+".xml"
elif options.xunit_file == "":
if options.world == "":
options.world = "cxxtest"
options.xunit_file="TEST-"+options.world+".xml"
if options.error_printer:
options.runner= "ErrorPrinter"
options.haveStandardLibrary = True
if options.noStaticInit and (options.root or options.part):
abort( '--no-static-init cannot be used with --root/--part' )
if options.gui and not options.runner:
options.runner = 'StdioPrinter'
files = setFiles(args[1:])
if len(files) == 0 and not options.root:
sys.stderr.write(parser.error("No input files found"))
return files
def printVersion():
'''Print CxxTest version and exit'''
sys.stdout.write( "This is CxxTest version %s.\n" % __release__.__version__ )
sys.exit(0)
def setFiles(patterns ):
'''Set input files specified on command line'''
files = expandWildcards( patterns )
return files
def expandWildcards( patterns ):
'''Expand all wildcards in an array (glob)'''
fileNames = []
for pathName in patterns:
patternFiles = glob.glob( pathName )
for fileName in patternFiles:
fileNames.append( fixBackslashes( fileName ) )
return fileNames
def fixBackslashes( fileName ):
'''Convert backslashes to slashes in file name'''
return re.sub( r'\\', '/', fileName, 0 )
def writeOutput():
'''Create output file'''
if options.templateFileName:
writeTemplateOutput()
else:
writeSimpleOutput()
def writeSimpleOutput():
'''Create output not based on template'''
output = startOutputFile()
writePreamble( output )
if options.root or not options.part:
writeMain( output )
if len(suites) > 0:
output.write("bool "+suites[0]['object']+"_init = false;\n")
writeWorld( output )
output.close()
include_re = re.compile( r"\s*\#\s*include\s+<cxxtest/" )
preamble_re = re.compile( r"^\s*<CxxTest\s+preamble>\s*$" )
world_re = re.compile( r"^\s*<CxxTest\s+world>\s*$" )
def writeTemplateOutput():
'''Create output based on template file'''
template = open(options.templateFileName)
output = startOutputFile()
while 1:
line = template.readline()
if not line:
break;
if include_re.search( line ):
writePreamble( output )
output.write( line )
elif preamble_re.search( line ):
writePreamble( output )
elif world_re.search( line ):
if len(suites) > 0:
output.write("bool "+suites[0]['object']+"_init = false;\n")
writeWorld( output )
else:
output.write( line )
template.close()
output.close()
def startOutputFile():
'''Create output file and write header'''
if options.outputFileName is not None:
output = open( options.outputFileName, 'w' )
else:
output = sys.stdout
output.write( "/* Generated file, do not edit */\n\n" )
return output
def writePreamble( output ):
'''Write the CxxTest header (#includes and #defines)'''
global wrotePreamble
if wrotePreamble: return
output.write( "#ifndef CXXTEST_RUNNING\n" )
output.write( "#define CXXTEST_RUNNING\n" )
output.write( "#endif\n" )
output.write( "\n" )
if options.xunit_printer:
output.write( "#include <fstream>\n" )
if options.haveStandardLibrary:
output.write( "#define _CXXTEST_HAVE_STD\n" )
if options.haveExceptionHandling:
output.write( "#define _CXXTEST_HAVE_EH\n" )
if options.abortOnFail:
output.write( "#define _CXXTEST_ABORT_TEST_ON_FAIL\n" )
if options.longlong:
output.write( "#define _CXXTEST_LONGLONG %s\n" % options.longlong )
#if options.factor:
#output.write( "#define _CXXTEST_FACTOR\n" )
for header in options.headers:
output.write( "#include \"%s\"\n" % header )
output.write( "#include <cxxtest/TestListener.h>\n" )
output.write( "#include <cxxtest/TestTracker.h>\n" )
output.write( "#include <cxxtest/TestRunner.h>\n" )
output.write( "#include <cxxtest/RealDescriptions.h>\n" )
output.write( "#include <cxxtest/TestMain.h>\n" )
if options.runner:
output.write( "#include <cxxtest/%s.h>\n" % options.runner )
if options.gui:
output.write( "#include <cxxtest/%s.h>\n" % options.gui )
output.write( "\n" )
wrotePreamble = 1
def writeMain( output ):
'''Write the main() function for the test runner'''
if not (options.gui or options.runner):
return
output.write( 'int %s( int argc, char *argv[] ) {\n' % options.main )
output.write( ' int status;\n' )
if options.noStaticInit:
output.write( ' CxxTest::initialize();\n' )
if options.gui:
tester_t = "CxxTest::GuiTuiRunner<CxxTest::%s, CxxTest::%s> " % (options.gui, options.runner)
else:
tester_t = "CxxTest::%s" % (options.runner)
if options.xunit_printer:
output.write( ' std::ofstream ofstr("%s");\n' % options.xunit_file )
output.write( ' %s tmp(ofstr);\n' % tester_t )
else:
output.write( ' %s tmp;\n' % tester_t )
output.write( ' CxxTest::RealWorldDescription::_worldName = "%s";\n' % options.world )
output.write( ' status = CxxTest::Main< %s >( tmp, argc, argv );\n' % tester_t )
output.write( ' return status;\n')
output.write( '}\n' )
def writeWorld( output ):
'''Write the world definitions'''
global wroteWorld
if wroteWorld: return
writePreamble( output )
writeSuites( output )
if options.root or not options.part:
writeRoot( output )
writeWorldDescr( output )
if options.noStaticInit:
writeInitialize( output )
wroteWorld = 1
def writeSuites(output):
'''Write all TestDescriptions and SuiteDescriptions'''
for suite in suites:
writeInclude( output, suite['file'] )
if isGenerated(suite):
generateSuite( output, suite )
if not options.noStaticInit:
if isDynamic(suite):
writeSuitePointer( output, suite )
else:
writeSuiteObject( output, suite )
writeTestList( output, suite )
writeSuiteDescription( output, suite )
writeTestDescriptions( output, suite )
def isGenerated(suite):
'''Checks whether a suite class should be created'''
return suite['generated']
def isDynamic(suite):
'''Checks whether a suite is dynamic'''
return 'create' in suite
def writeInclude(output, file):
'''Add #include "file" statement'''
global lastIncluded
if options.outputFileName:
dirname = os.path.split(options.outputFileName)[0]
tfile = relpath(file, dirname)
if os.path.exists(tfile):
if tfile == lastIncluded: return
output.writelines( [ '#include "', tfile, '"\n\n' ] )
lastIncluded = tfile
return
#
# Use an absolute path if the relative path failed
#
tfile = os.path.abspath(file)
if os.path.exists(tfile):
if tfile == lastIncluded: return
output.writelines( [ '#include "', tfile, '"\n\n' ] )
lastIncluded = tfile
return
def generateSuite( output, suite ):
'''Write a suite declared with CXXTEST_SUITE()'''
output.write( 'class %s : public CxxTest::TestSuite {\n' % suite['fullname'] )
output.write( 'public:\n' )
for line in suite['lines']:
output.write(line)
output.write( '};\n\n' )
def writeSuitePointer( output, suite ):
'''Create static suite pointer object for dynamic suites'''
if options.noStaticInit:
output.write( 'static %s* %s;\n\n' % (suite['fullname'], suite['object']) )
else:
output.write( 'static %s* %s = 0;\n\n' % (suite['fullname'], suite['object']) )
def writeSuiteObject( output, suite ):
'''Create static suite object for non-dynamic suites'''
output.writelines( [ "static ", suite['fullname'], " ", suite['object'], ";\n\n" ] )
def writeTestList( output, suite ):
'''Write the head of the test linked list for a suite'''
if options.noStaticInit:
output.write( 'static CxxTest::List %s;\n' % suite['tlist'] )
else:
output.write( 'static CxxTest::List %s = { 0, 0 };\n' % suite['tlist'] )
def writeWorldDescr( output ):
'''Write the static name of the world name'''
if options.noStaticInit:
output.write( 'const char* CxxTest::RealWorldDescription::_worldName;\n' )
else:
output.write( 'const char* CxxTest::RealWorldDescription::_worldName = "cxxtest";\n' )
def writeTestDescriptions( output, suite ):
'''Write all test descriptions for a suite'''
for test in suite['tests']:
writeTestDescription( output, suite, test )
def writeTestDescription( output, suite, test ):
'''Write test description object'''
if not options.noStaticInit:
output.write( 'static class %s : public CxxTest::RealTestDescription {\n' % test['class'] )
else:
output.write( 'class %s : public CxxTest::RealTestDescription {\n' % test['class'] )
#
output.write( 'public:\n' )
if not options.noStaticInit:
output.write( ' %s() : CxxTest::RealTestDescription( %s, %s, %s, "%s" ) {}\n' %
(test['class'], suite['tlist'], suite['dobject'], test['line'], test['name']) )
else:
if isDynamic(suite):
output.write( ' %s(%s* _%s) : %s(_%s) { }\n' %
(test['class'], suite['fullname'], suite['object'], suite['object'], suite['object']) )
output.write( ' %s* %s;\n' % (suite['fullname'], suite['object']) )
else:
output.write( ' %s(%s& _%s) : %s(_%s) { }\n' %
(test['class'], suite['fullname'], suite['object'], suite['object'], suite['object']) )
output.write( ' %s& %s;\n' % (suite['fullname'], suite['object']) )
output.write( ' void runTest() { %s }\n' % runBody( suite, test ) )
#
if not options.noStaticInit:
output.write( '} %s;\n\n' % test['object'] )
else:
output.write( '};\n\n' )
def runBody( suite, test ):
'''Body of TestDescription::run()'''
if isDynamic(suite): return dynamicRun( suite, test )
else: return staticRun( suite, test )
def dynamicRun( suite, test ):
'''Body of TestDescription::run() for test in a dynamic suite'''
return 'if ( ' + suite['object'] + ' ) ' + suite['object'] + '->' + test['name'] + '();'
def staticRun( suite, test ):
'''Body of TestDescription::run() for test in a non-dynamic suite'''
return suite['object'] + '.' + test['name'] + '();'
def writeSuiteDescription( output, suite ):
'''Write SuiteDescription object'''
if isDynamic( suite ):
writeDynamicDescription( output, suite )
else:
writeStaticDescription( output, suite )
def writeDynamicDescription( output, suite ):
'''Write SuiteDescription for a dynamic suite'''
output.write( 'CxxTest::DynamicSuiteDescription< %s > %s' % (suite['fullname'], suite['dobject']) )
if not options.noStaticInit:
output.write( '( %s, %s, "%s", %s, %s, %s, %s )' %
(suite['cfile'], suite['line'], suite['fullname'], suite['tlist'],
suite['object'], suite['create'], suite['destroy']) )
output.write( ';\n\n' )
def writeStaticDescription( output, suite ):
'''Write SuiteDescription for a static suite'''
output.write( 'CxxTest::StaticSuiteDescription %s' % suite['dobject'] )
if not options.noStaticInit:
output.write( '( %s, %s, "%s", %s, %s )' %
(suite['cfile'], suite['line'], suite['fullname'], suite['object'], suite['tlist']) )
output.write( ';\n\n' )
def writeRoot(output):
'''Write static members of CxxTest classes'''
output.write( '#include <cxxtest/Root.cpp>\n' )
def writeInitialize(output):
'''Write CxxTest::initialize(), which replaces static initialization'''
output.write( 'namespace CxxTest {\n' )
output.write( ' void initialize()\n' )
output.write( ' {\n' )
for suite in suites:
#print "HERE", suite
writeTestList( output, suite )
output.write( ' %s.initialize();\n' % suite['tlist'] )
#writeSuiteObject( output, suite )
if isDynamic(suite):
writeSuitePointer( output, suite )
output.write( ' %s = 0;\n' % suite['object'])
else:
writeSuiteObject( output, suite )
output.write( ' static ')
writeSuiteDescription( output, suite )
if isDynamic(suite):
#output.write( ' %s = %s.suite();\n' % (suite['object'],suite['dobject']) )
output.write( ' %s.initialize( %s, %s, "%s", %s, %s, %s, %s );\n' %
(suite['dobject'], suite['cfile'], suite['line'], suite['fullname'],
suite['tlist'], suite['object'], suite['create'], suite['destroy']) )
output.write( ' %s.setUp();\n' % suite['dobject'])
else:
output.write( ' %s.initialize( %s, %s, "%s", %s, %s );\n' %
(suite['dobject'], suite['cfile'], suite['line'], suite['fullname'],
suite['object'], suite['tlist']) )
for test in suite['tests']:
output.write( ' static %s %s(%s);\n' %
(test['class'], test['object'], suite['object']) )
output.write( ' %s.initialize( %s, %s, %s, "%s" );\n' %
(test['object'], suite['tlist'], suite['dobject'], test['line'], test['name']) )
output.write( ' }\n' )
output.write( '}\n' )
man_template=Template("""CXXTESTGEN(1)
=============
:doctype: manpage
NAME
----
cxxtestgen - performs test discovery to create a CxxTest test runner
SYNOPSIS
--------
${usage}
DESCRIPTION
-----------
${description}
OPTIONS
-------
${options}
EXIT STATUS
-----------
*0*::
Success
*1*::
Failure (syntax or usage error; configuration error; document
processing failure; unexpected error).
BUGS
----
See the CxxTest Home Page for the link to the CxxTest ticket repository.
AUTHOR
------
CxxTest was originally written by Erez Volk. Many people have
contributed to it.
RESOURCES
---------
Home page: <http://cxxtest.com/>
CxxTest User Guide: <http://cxxtest.com/cxxtest/doc/guide.html>
COPYING
-------
Copyright (c) 2008 Sandia Corporation. This software is distributed
under the Lesser GNU General Public License (LGPL) v3
""")
def create_manpage():
"""Write ASCIIDOC manpage file"""
parser = create_parser(asciidoc=True)
#
usage = parser.usage
description = parser.description
options=""
for opt in parser.option_list:
opts = opt._short_opts + opt._long_opts
optstr = '*' + ', '.join(opts) + '*'
if not opt.metavar is None:
optstr += "='%s'" % opt.metavar
optstr += '::\n'
options += optstr
#
options += opt.help
options += '\n\n'
#
OUTPUT = open('cxxtestgen.1.txt','w')
OUTPUT.write( man_template.substitute(usage=usage, description=description, options=options) )
OUTPUT.close()
|
upliftaero/MissionPlanner | refs/heads/master | Lib/site-packages/numpy/random/setup.py | 54 | from os.path import join
from numpy.distutils.system_info import get_info
def testcode_wincrypt():
return """\
/* check to see if _WIN32 is defined */
int main(int argc, char *argv[])
{
#ifdef _WIN32
return 0;
#else
return 1;
#endif
}
"""
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration, get_mathlibs
config = Configuration('random',parent_package,top_path)
def generate_libraries(ext, build_dir):
config_cmd = config.get_config_cmd()
libs = get_mathlibs()
tc = testcode_wincrypt()
if config_cmd.try_run(tc):
libs.append('Advapi32')
ext.libraries.extend(libs)
return None
# Configure mtrand
config.add_extension('mtrand',
sources=[join('mtrand', x) for x in
['mtrand.c', 'randomkit.c', 'initarray.c',
'distributions.c']
] + [generate_libraries],
depends = [join('mtrand','*.h'),
join('mtrand','*.pyx'),
join('mtrand','*.pxi'),
],
**get_info('ndarray'))
config.add_data_files(('.', join('mtrand', 'randomkit.h')))
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
|
ssorgatem/qiime | refs/heads/master | qiime/pycogent_backports/__init__.py | 15 | #!/usr/bin/env python
__author__ = "The QIIME Development Team"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Rob Knight",
"Greg Caporaso",
"Jeremy Widmann",
"Kyle Bittinger",
"Justin Kuczynski",
"William Walters",
"Jesse Zaneveld",
"Dan Knights",
"Jesse Stombaugh",
"Micah Hamady",
"Julia Goodrich",
"Meg Pirrung",
"Jens Reeder",
"Daniel McDonald",
"Catherine Lozupone"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "gregcaporaso@gmail.com"
|
yamahata/neutron | refs/heads/master | neutron/agent/metadata/agent.py | 5 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import hashlib
import hmac
import os
import socket
import eventlet
import httplib2
from neutronclient.v2_0 import client
from oslo.config import cfg
import six.moves.urllib.parse as urlparse
import webob
from neutron.agent.common import config as agent_conf
from neutron.agent import rpc as agent_rpc
from neutron.common import config
from neutron.common import constants as n_const
from neutron.common import topics
from neutron.common import utils
from neutron import context
from neutron.openstack.common import excutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.openstack.common import service
from neutron import wsgi
LOG = logging.getLogger(__name__)
class MetadataProxyHandler(object):
OPTS = [
cfg.StrOpt('admin_user',
help=_("Admin user")),
cfg.StrOpt('admin_password',
help=_("Admin password"),
secret=True),
cfg.StrOpt('admin_tenant_name',
help=_("Admin tenant name")),
cfg.StrOpt('auth_url',
help=_("Authentication URL")),
cfg.StrOpt('auth_strategy', default='keystone',
help=_("The type of authentication to use")),
cfg.StrOpt('auth_region',
help=_("Authentication region")),
cfg.BoolOpt('auth_insecure',
default=False,
help=_("Turn off verification of the certificate for"
" ssl")),
cfg.StrOpt('auth_ca_cert',
default=None,
help=_("Certificate Authority public key (CA cert) "
"file for ssl")),
cfg.StrOpt('endpoint_type',
default='adminURL',
help=_("Network service endpoint type to pull from "
"the keystone catalog")),
cfg.StrOpt('nova_metadata_ip', default='127.0.0.1',
help=_("IP address used by Nova metadata server.")),
cfg.IntOpt('nova_metadata_port',
default=8775,
help=_("TCP Port used by Nova metadata server.")),
cfg.StrOpt('metadata_proxy_shared_secret',
default='',
help=_('Shared secret to sign instance-id request'),
secret=True)
]
def __init__(self, conf):
self.conf = conf
self.auth_info = {}
def _get_neutron_client(self):
qclient = client.Client(
username=self.conf.admin_user,
password=self.conf.admin_password,
tenant_name=self.conf.admin_tenant_name,
auth_url=self.conf.auth_url,
auth_strategy=self.conf.auth_strategy,
region_name=self.conf.auth_region,
token=self.auth_info.get('auth_token'),
insecure=self.conf.auth_insecure,
ca_cert=self.conf.auth_ca_cert,
endpoint_url=self.auth_info.get('endpoint_url'),
endpoint_type=self.conf.endpoint_type
)
return qclient
@webob.dec.wsgify(RequestClass=webob.Request)
def __call__(self, req):
try:
LOG.debug(_("Request: %s"), req)
instance_id, tenant_id = self._get_instance_and_tenant_id(req)
if instance_id:
return self._proxy_request(instance_id, tenant_id, req)
else:
return webob.exc.HTTPNotFound()
except Exception:
LOG.exception(_("Unexpected error."))
msg = _('An unknown error has occurred. '
'Please try your request again.')
return webob.exc.HTTPInternalServerError(explanation=unicode(msg))
def _get_instance_and_tenant_id(self, req):
qclient = self._get_neutron_client()
remote_address = req.headers.get('X-Forwarded-For')
network_id = req.headers.get('X-Neutron-Network-ID')
router_id = req.headers.get('X-Neutron-Router-ID')
if network_id:
networks = [network_id]
else:
internal_ports = qclient.list_ports(
device_id=router_id,
device_owner=n_const.DEVICE_OWNER_ROUTER_INTF)['ports']
networks = [p['network_id'] for p in internal_ports]
ports = qclient.list_ports(
network_id=networks,
fixed_ips=['ip_address=%s' % remote_address])['ports']
self.auth_info = qclient.get_auth_info()
if len(ports) == 1:
return ports[0]['device_id'], ports[0]['tenant_id']
return None, None
def _proxy_request(self, instance_id, tenant_id, req):
headers = {
'X-Forwarded-For': req.headers.get('X-Forwarded-For'),
'X-Instance-ID': instance_id,
'X-Tenant-ID': tenant_id,
'X-Instance-ID-Signature': self._sign_instance_id(instance_id)
}
url = urlparse.urlunsplit((
'http',
'%s:%s' % (self.conf.nova_metadata_ip,
self.conf.nova_metadata_port),
req.path_info,
req.query_string,
''))
h = httplib2.Http()
resp, content = h.request(url, method=req.method, headers=headers,
body=req.body)
if resp.status == 200:
LOG.debug(str(resp))
req.response.content_type = resp['content-type']
req.response.body = content
return req.response
elif resp.status == 403:
msg = _(
'The remote metadata server responded with Forbidden. This '
'response usually occurs when shared secrets do not match.'
)
LOG.warn(msg)
return webob.exc.HTTPForbidden()
elif resp.status == 404:
return webob.exc.HTTPNotFound()
elif resp.status == 409:
return webob.exc.HTTPConflict()
elif resp.status == 500:
msg = _(
'Remote metadata server experienced an internal server error.'
)
LOG.warn(msg)
return webob.exc.HTTPInternalServerError(explanation=unicode(msg))
else:
raise Exception(_('Unexpected response code: %s') % resp.status)
def _sign_instance_id(self, instance_id):
return hmac.new(self.conf.metadata_proxy_shared_secret,
instance_id,
hashlib.sha256).hexdigest()
class UnixDomainHttpProtocol(eventlet.wsgi.HttpProtocol):
def __init__(self, request, client_address, server):
if client_address == '':
client_address = ('<local>', 0)
# base class is old-style, so super does not work properly
eventlet.wsgi.HttpProtocol.__init__(self, request, client_address,
server)
class WorkerService(wsgi.WorkerService):
def start(self):
self._server = self._service.pool.spawn(self._service._run,
self._application,
self._service._socket)
class UnixDomainWSGIServer(wsgi.Server):
def __init__(self, name):
self._socket = None
self._launcher = None
self._server = None
super(UnixDomainWSGIServer, self).__init__(name)
def start(self, application, file_socket, workers, backlog):
self._socket = eventlet.listen(file_socket,
family=socket.AF_UNIX,
backlog=backlog)
if workers < 1:
# For the case where only one process is required.
self._server = self.pool.spawn_n(self._run, application,
self._socket)
else:
# Minimize the cost of checking for child exit by extending the
# wait interval past the default of 0.01s.
self._launcher = service.ProcessLauncher(wait_interval=1.0)
self._server = WorkerService(self, application)
self._launcher.launch_service(self._server, workers=workers)
def _run(self, application, socket):
"""Start a WSGI service in a new green thread."""
logger = logging.getLogger('eventlet.wsgi.server')
eventlet.wsgi.server(socket,
application,
custom_pool=self.pool,
protocol=UnixDomainHttpProtocol,
log=logging.WritableLogger(logger))
class UnixDomainMetadataProxy(object):
OPTS = [
cfg.StrOpt('metadata_proxy_socket',
default='$state_path/metadata_proxy',
help=_('Location for Metadata Proxy UNIX domain socket')),
cfg.IntOpt('metadata_workers',
default=0,
help=_('Number of separate worker processes for metadata '
'server')),
cfg.IntOpt('metadata_backlog',
default=128,
help=_('Number of backlog requests to configure the '
'metadata server socket with'))
]
def __init__(self, conf):
self.conf = conf
dirname = os.path.dirname(cfg.CONF.metadata_proxy_socket)
if os.path.isdir(dirname):
try:
os.unlink(cfg.CONF.metadata_proxy_socket)
except OSError:
with excutils.save_and_reraise_exception() as ctxt:
if not os.path.exists(cfg.CONF.metadata_proxy_socket):
ctxt.reraise = False
else:
os.makedirs(dirname, 0o755)
self._init_state_reporting()
def _init_state_reporting(self):
self.context = context.get_admin_context_without_session()
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
self.agent_state = {
'binary': 'neutron-metadata-agent',
'host': cfg.CONF.host,
'topic': 'N/A',
'configurations': {
'metadata_proxy_socket': cfg.CONF.metadata_proxy_socket,
'nova_metadata_ip': cfg.CONF.nova_metadata_ip,
'nova_metadata_port': cfg.CONF.nova_metadata_port,
},
'start_flag': True,
'agent_type': n_const.AGENT_TYPE_METADATA}
report_interval = cfg.CONF.AGENT.report_interval
if report_interval:
self.heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
self.heartbeat.start(interval=report_interval)
def _report_state(self):
try:
self.state_rpc.report_state(
self.context,
self.agent_state,
use_call=self.agent_state.get('start_flag'))
except AttributeError:
# This means the server does not support report_state
LOG.warn(_('Neutron server does not support state report.'
' State report for this agent will be disabled.'))
self.heartbeat.stop()
return
except Exception:
LOG.exception(_("Failed reporting state!"))
return
self.agent_state.pop('start_flag', None)
def run(self):
server = UnixDomainWSGIServer('neutron-metadata-agent')
server.start(MetadataProxyHandler(self.conf),
self.conf.metadata_proxy_socket,
workers=self.conf.metadata_workers,
backlog=self.conf.metadata_backlog)
server.wait()
def main():
eventlet.monkey_patch()
cfg.CONF.register_opts(UnixDomainMetadataProxy.OPTS)
cfg.CONF.register_opts(MetadataProxyHandler.OPTS)
agent_conf.register_agent_state_opts_helper(cfg.CONF)
cfg.CONF(project='neutron')
config.setup_logging(cfg.CONF)
utils.log_opt_values(LOG)
proxy = UnixDomainMetadataProxy(cfg.CONF)
proxy.run()
|
kohout/djangocms-getaweb-calendar | refs/heads/master | setup.py | 1 | import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='djangocms-getaweb-calendar',
version='0.1',
packages=['djangocms_calendar'],
include_package_data=True,
license='Unlicense', # example license
description='A calendar app for Django CMS 3.0',
long_description=README,
url='https://github.com/kohout/djangocms-getaweb-calendar/',
author='Christian Kohout',
author_email='ck@getaweb.at',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
|
chjw8016/GreenOdoo7-haibao | refs/heads/master | openerp/addons/base/res/wizard/__init__.py | 63 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2011 OpenERP S.A (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import change_password_wizard |
pgmillon/ansible | refs/heads/devel | test/units/modules/storage/netapp/test_na_ontap_quotas.py | 38 | ''' unit tests ONTAP Ansible module: na_ontap_quotas '''
from __future__ import print_function
import json
import pytest
from units.compat import unittest
from units.compat.mock import patch
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
import ansible.module_utils.netapp as netapp_utils
from ansible.modules.storage.netapp.na_ontap_quotas \
import NetAppONTAPQuotas as my_module
if not netapp_utils.has_netapp_lib():
pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
def set_module_args(args):
"""prepare arguments so that they will be picked up during module creation"""
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
class AnsibleExitJson(Exception):
"""Exception class to be raised by module.exit_json and caught by the test case"""
pass
class AnsibleFailJson(Exception):
"""Exception class to be raised by module.fail_json and caught by the test case"""
pass
def exit_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over exit_json; package return data into an exception"""
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
def fail_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over fail_json; package return data into an exception"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
class MockONTAPConnection(object):
''' mock server connection to ONTAP host '''
def __init__(self, kind=None):
''' save arguments '''
self.type = kind
self.xml_in = None
self.xml_out = None
def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
''' mock invoke_successfully returning xml data '''
self.xml_in = xml
if self.type == 'quotas':
xml = self.build_quota_info()
elif self.type == 'quota_fail':
raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test")
self.xml_out = xml
return xml
@staticmethod
def build_quota_info():
''' build xml data for quota-entry '''
xml = netapp_utils.zapi.NaElement('xml')
data = {'num-records': 1,
'attributes-list': {'quota-entry': {'volume': 'ansible',
'file-limit': '-', 'disk-limit': '-', 'threshold': '-'}},
'status': 'true'}
xml.translate_struct(data)
return xml
class TestMyModule(unittest.TestCase):
''' a group of related Unit Tests '''
def setUp(self):
self.mock_module_helper = patch.multiple(basic.AnsibleModule,
exit_json=exit_json,
fail_json=fail_json)
self.mock_module_helper.start()
self.addCleanup(self.mock_module_helper.stop)
self.server = MockONTAPConnection()
self.onbox = False
def set_default_args(self):
if self.onbox:
hostname = '10.193.75.3'
username = 'admin'
password = 'netapp1!'
volume = 'ansible'
vserver = 'ansible'
policy = 'ansible'
quota_target = '/vol/ansible'
type = 'user'
else:
hostname = 'hostname'
username = 'username'
password = 'password'
volume = 'ansible'
vserver = 'ansible'
policy = 'ansible'
quota_target = '/vol/ansible'
type = 'user'
return dict({
'hostname': hostname,
'username': username,
'password': password,
'volume': volume,
'vserver': vserver,
'policy': policy,
'quota_target': quota_target,
'type': type
})
def test_module_fail_when_required_args_missing(self):
''' required arguments are reported as errors '''
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({})
my_module()
print('Info: %s' % exc.value.args[0]['msg'])
def test_ensure_get_called(self):
''' test get_quota for non-existent quota'''
set_module_args(self.set_default_args())
my_obj = my_module()
my_obj.server = self.server
assert my_obj.get_quotas is not None
def test_ensure_get_called_existing(self):
''' test get_quota for existing quota'''
set_module_args(self.set_default_args())
my_obj = my_module()
my_obj.server = MockONTAPConnection(kind='quotas')
assert my_obj.get_quotas()
@patch('ansible.modules.storage.netapp.na_ontap_quotas.NetAppONTAPQuotas.quota_entry_set')
def test_successful_create(self, quota_entry_set):
''' creating quota and testing idempotency '''
data = self.set_default_args()
data.update({'file_limit': '3',
'disk_limit': '4'})
# data['file_limit'] = '3'
# data['disk_limit'] = '4'
# data['threshold'] = '4'
set_module_args(data)
my_obj = my_module()
if not self.onbox:
my_obj.server = self.server
with pytest.raises(AnsibleExitJson) as exc:
my_obj.apply()
assert exc.value.args[0]['changed']
quota_entry_set.assert_called_with()
# to reset na_helper from remembering the previous 'changed' value
set_module_args(self.set_default_args())
my_obj = my_module()
if not self.onbox:
my_obj.server = MockONTAPConnection('quotas')
with pytest.raises(AnsibleExitJson) as exc:
my_obj.apply()
assert not exc.value.args[0]['changed']
@patch('ansible.modules.storage.netapp.na_ontap_quotas.NetAppONTAPQuotas.quota_entry_delete')
def test_successful_delete(self, quota_entry_delete):
''' deleting quota and testing idempotency '''
data = self.set_default_args()
data['state'] = 'absent'
set_module_args(data)
my_obj = my_module()
if not self.onbox:
my_obj.server = MockONTAPConnection('quotas')
with pytest.raises(AnsibleExitJson) as exc:
my_obj.apply()
assert exc.value.args[0]['changed']
quota_entry_delete.assert_called_with()
# to reset na_helper from remembering the previous 'changed' value
my_obj = my_module()
if not self.onbox:
my_obj.server = self.server
with pytest.raises(AnsibleExitJson) as exc:
my_obj.apply()
assert not exc.value.args[0]['changed']
def test_successful_modify(self):
''' modifying quota and testing idempotency '''
data = self.set_default_args()
data['file_limit'] = '3'
set_module_args(data)
my_obj = my_module()
if not self.onbox:
my_obj.server = MockONTAPConnection('quotas')
with pytest.raises(AnsibleExitJson) as exc:
my_obj.apply()
assert exc.value.args[0]['changed']
def test_quota_on_off(self):
''' quota set on or off '''
data = self.set_default_args()
data['set_quota_status'] = 'false'
set_module_args(data)
my_obj = my_module()
if not self.onbox:
my_obj.server = MockONTAPConnection('quotas')
with pytest.raises(AnsibleExitJson) as exc:
my_obj.apply()
assert not exc.value.args[0]['changed']
def test_if_all_methods_catch_exception(self):
module_args = {}
module_args.update(self.set_default_args())
set_module_args(module_args)
my_obj = my_module()
if not self.onbox:
my_obj.server = MockONTAPConnection('quota_fail')
with pytest.raises(AnsibleFailJson) as exc:
my_obj.get_quota_status()
assert 'Error fetching quotas status info' in exc.value.args[0]['msg']
with pytest.raises(AnsibleFailJson) as exc:
my_obj.get_quotas()
assert 'Error fetching quotas info' in exc.value.args[0]['msg']
with pytest.raises(AnsibleFailJson) as exc:
my_obj.quota_entry_set()
assert 'Error adding/modifying quota entry' in exc.value.args[0]['msg']
with pytest.raises(AnsibleFailJson) as exc:
my_obj.quota_entry_delete()
assert 'Error deleting quota entry' in exc.value.args[0]['msg']
with pytest.raises(AnsibleFailJson) as exc:
my_obj.quota_entry_modify(module_args)
assert 'Error modifying quota entry' in exc.value.args[0]['msg']
with pytest.raises(AnsibleFailJson) as exc:
my_obj.on_or_off_quota('quota-on')
assert 'Error setting quota-on for ansible' in exc.value.args[0]['msg']
|
hofschroeer/shinysdr | refs/heads/master | shinysdr/test/broken_deps/imports.py | 1 | # pylint: disable=import-error, no-name-in-module, unused-import
import shinysdr.test.nonexistent_module_in_dep
|
varunarya10/python-ironicclient | refs/heads/master | ironicclient/common/http.py | 2 | # -*- coding: utf-8 -*-
#
# Copyright 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import json
import logging
import os
import socket
import ssl
from keystoneclient import adapter
import six
import six.moves.urllib.parse as urlparse
from ironicclient import exc
LOG = logging.getLogger(__name__)
USER_AGENT = 'python-ironicclient'
CHUNKSIZE = 1024 * 64 # 64kB
API_VERSION = '/v1'
def _trim_endpoint_api_version(url):
"""Trim API version and trailing slash from endpoint."""
return url.rstrip('/').rstrip(API_VERSION)
def _extract_error_json(body):
"""Return error_message from the HTTP response body."""
error_json = {}
try:
body_json = json.loads(body)
if 'error_message' in body_json:
raw_msg = body_json['error_message']
error_json = json.loads(raw_msg)
except ValueError:
pass
return error_json
class HTTPClient(object):
def __init__(self, endpoint, **kwargs):
self.endpoint = endpoint
self.endpoint_trimmed = _trim_endpoint_api_version(endpoint)
self.auth_token = kwargs.get('token')
self.auth_ref = kwargs.get('auth_ref')
self.connection_params = self.get_connection_params(endpoint, **kwargs)
@staticmethod
def get_connection_params(endpoint, **kwargs):
parts = urlparse.urlparse(endpoint)
path = _trim_endpoint_api_version(parts.path)
_args = (parts.hostname, parts.port, path)
_kwargs = {'timeout': (float(kwargs.get('timeout'))
if kwargs.get('timeout') else 600)}
if parts.scheme == 'https':
_class = VerifiedHTTPSConnection
_kwargs['ca_file'] = kwargs.get('ca_file', None)
_kwargs['cert_file'] = kwargs.get('cert_file', None)
_kwargs['key_file'] = kwargs.get('key_file', None)
_kwargs['insecure'] = kwargs.get('insecure', False)
elif parts.scheme == 'http':
_class = six.moves.http_client.HTTPConnection
else:
msg = 'Unsupported scheme: %s' % parts.scheme
raise exc.EndpointException(msg)
return (_class, _args, _kwargs)
def get_connection(self):
_class = self.connection_params[0]
try:
return _class(*self.connection_params[1][0:2],
**self.connection_params[2])
except six.moves.http_client.InvalidURL:
raise exc.EndpointException()
def log_curl_request(self, method, url, kwargs):
curl = ['curl -i -X %s' % method]
for (key, value) in kwargs['headers'].items():
header = '-H \'%s: %s\'' % (key, value)
curl.append(header)
conn_params_fmt = [
('key_file', '--key %s'),
('cert_file', '--cert %s'),
('ca_file', '--cacert %s'),
]
for (key, fmt) in conn_params_fmt:
value = self.connection_params[2].get(key)
if value:
curl.append(fmt % value)
if self.connection_params[2].get('insecure'):
curl.append('-k')
if 'body' in kwargs:
curl.append('-d \'%s\'' % kwargs['body'])
curl.append(urlparse.urljoin(self.endpoint_trimmed, url))
LOG.debug(' '.join(curl))
@staticmethod
def log_http_response(resp, body=None):
status = (resp.version / 10.0, resp.status, resp.reason)
dump = ['\nHTTP/%.1f %s %s' % status]
dump.extend(['%s: %s' % (k, v) for k, v in resp.getheaders()])
dump.append('')
if body:
dump.extend([body, ''])
LOG.debug('\n'.join(dump))
def _make_connection_url(self, url):
(_class, _args, _kwargs) = self.connection_params
base_url = _args[2]
return '%s/%s' % (base_url, url.lstrip('/'))
def _http_request(self, url, method, **kwargs):
"""Send an http request with the specified characteristics.
Wrapper around httplib.HTTP(S)Connection.request to handle tasks such
as setting headers and error handling.
"""
# Copy the kwargs so we can reuse the original in case of redirects
kwargs['headers'] = copy.deepcopy(kwargs.get('headers', {}))
kwargs['headers'].setdefault('User-Agent', USER_AGENT)
if self.auth_token:
kwargs['headers'].setdefault('X-Auth-Token', self.auth_token)
self.log_curl_request(method, url, kwargs)
conn = self.get_connection()
try:
conn_url = self._make_connection_url(url)
conn.request(method, conn_url, **kwargs)
resp = conn.getresponse()
except socket.gaierror as e:
message = ("Error finding address for %(url)s: %(e)s"
% dict(url=url, e=e))
raise exc.EndpointNotFound(message)
except (socket.error, socket.timeout) as e:
endpoint = self.endpoint
message = ("Error communicating with %(endpoint)s %(e)s"
% dict(endpoint=endpoint, e=e))
raise exc.ConnectionRefused(message)
body_iter = ResponseBodyIterator(resp)
# Read body into string if it isn't obviously image data
body_str = None
if resp.getheader('content-type', None) != 'application/octet-stream':
body_str = ''.join([chunk for chunk in body_iter])
self.log_http_response(resp, body_str)
body_iter = six.StringIO(body_str)
else:
self.log_http_response(resp)
if 400 <= resp.status < 600:
LOG.warn("Request returned failure status.")
error_json = _extract_error_json(body_str)
raise exc.from_response(
resp, error_json.get('faultstring'),
error_json.get('debuginfo'), method, url)
elif resp.status in (301, 302, 305):
# Redirected. Reissue the request to the new location.
return self._http_request(resp['location'], method, **kwargs)
elif resp.status == 300:
raise exc.from_response(resp, method=method, url=url)
return resp, body_iter
def json_request(self, method, url, **kwargs):
kwargs.setdefault('headers', {})
kwargs['headers'].setdefault('Content-Type', 'application/json')
kwargs['headers'].setdefault('Accept', 'application/json')
if 'body' in kwargs:
kwargs['body'] = json.dumps(kwargs['body'])
resp, body_iter = self._http_request(url, method, **kwargs)
content_type = resp.getheader('content-type', None)
if resp.status == 204 or resp.status == 205 or content_type is None:
return resp, list()
if 'application/json' in content_type:
body = ''.join([chunk for chunk in body_iter])
try:
body = json.loads(body)
except ValueError:
LOG.error('Could not decode response body as JSON')
else:
body = None
return resp, body
def raw_request(self, method, url, **kwargs):
kwargs.setdefault('headers', {})
kwargs['headers'].setdefault('Content-Type',
'application/octet-stream')
return self._http_request(url, method, **kwargs)
class VerifiedHTTPSConnection(six.moves.http_client.HTTPSConnection):
"""httplib-compatibile connection using client-side SSL authentication
:see http://code.activestate.com/recipes/
577548-https-httplib-client-connection-with-certificate-v/
"""
def __init__(self, host, port, key_file=None, cert_file=None,
ca_file=None, timeout=None, insecure=False):
six.moves.http_client.HTTPSConnection.__init__(self, host, port,
key_file=key_file,
cert_file=cert_file)
self.key_file = key_file
self.cert_file = cert_file
if ca_file is not None:
self.ca_file = ca_file
else:
self.ca_file = self.get_system_ca_file()
self.timeout = timeout
self.insecure = insecure
def connect(self):
"""Connect to a host on a given (SSL) port.
If ca_file is pointing somewhere, use it to check Server Certificate.
Redefined/copied and extended from httplib.py:1105 (Python 2.6.x).
This is needed to pass cert_reqs=ssl.CERT_REQUIRED as parameter to
ssl.wrap_socket(), which forces SSL to check server certificate against
our client certificate.
"""
sock = socket.create_connection((self.host, self.port), self.timeout)
if self._tunnel_host:
self.sock = sock
self._tunnel()
if self.insecure is True:
kwargs = {'cert_reqs': ssl.CERT_NONE}
else:
kwargs = {'cert_reqs': ssl.CERT_REQUIRED, 'ca_certs': self.ca_file}
if self.cert_file:
kwargs['certfile'] = self.cert_file
if self.key_file:
kwargs['keyfile'] = self.key_file
self.sock = ssl.wrap_socket(sock, **kwargs)
@staticmethod
def get_system_ca_file():
"""Return path to system default CA file."""
# Standard CA file locations for Debian/Ubuntu, RedHat/Fedora,
# Suse, FreeBSD/OpenBSD
ca_path = ['/etc/ssl/certs/ca-certificates.crt',
'/etc/pki/tls/certs/ca-bundle.crt',
'/etc/ssl/ca-bundle.pem',
'/etc/ssl/cert.pem']
for ca in ca_path:
if os.path.exists(ca):
return ca
return None
class SessionClient(adapter.LegacyJsonAdapter):
"""HTTP client based on Keystone client session."""
def _http_request(self, url, method, **kwargs):
kwargs.setdefault('user_agent', USER_AGENT)
kwargs.setdefault('auth', self.auth)
endpoint_filter = kwargs.setdefault('endpoint_filter', {})
endpoint_filter.setdefault('interface', self.interface)
endpoint_filter.setdefault('service_type', self.service_type)
endpoint_filter.setdefault('region_name', self.region_name)
resp = self.session.request(url, method,
raise_exc=False, **kwargs)
if 400 <= resp.status_code < 600:
error_json = _extract_error_json(resp.content)
raise exc.from_response(resp, error_json.get('faultstring'),
error_json.get('debuginfo'), method, url)
elif resp.status_code in (301, 302, 305):
# Redirected. Reissue the request to the new location.
location = resp.headers.get('location')
resp = self._http_request(location, method, **kwargs)
elif resp.status_code == 300:
raise exc.from_response(resp, method=method, url=url)
return resp
def json_request(self, method, url, **kwargs):
kwargs.setdefault('headers', {})
kwargs['headers'].setdefault('Content-Type', 'application/json')
kwargs['headers'].setdefault('Accept', 'application/json')
if 'body' in kwargs:
kwargs['data'] = json.dumps(kwargs.pop('body'))
resp = self._http_request(url, method, **kwargs)
body = resp.content
content_type = resp.headers.get('content-type', None)
status = resp.status_code
if status == 204 or status == 205 or content_type is None:
return resp, list()
if 'application/json' in content_type:
try:
body = resp.json()
except ValueError:
LOG.error('Could not decode response body as JSON')
else:
body = None
return resp, body
def raw_request(self, method, url, **kwargs):
kwargs.setdefault('headers', {})
kwargs['headers'].setdefault('Content-Type',
'application/octet-stream')
return self._http_request(url, method, **kwargs)
class ResponseBodyIterator(object):
"""A class that acts as an iterator over an HTTP response."""
def __init__(self, resp):
self.resp = resp
def __iter__(self):
while True:
yield self.next()
def next(self):
chunk = self.resp.read(CHUNKSIZE)
if chunk:
return chunk
else:
raise StopIteration()
def _construct_http_client(*args, **kwargs):
session = kwargs.pop('session', None)
auth = kwargs.pop('auth', None)
if session:
service_type = kwargs.pop('service_type', 'baremetal')
interface = kwargs.pop('endpoint_type', None)
region_name = kwargs.pop('region_name', None)
return SessionClient(session=session,
auth=auth,
interface=interface,
service_type=service_type,
region_name=region_name,
service_name=None,
user_agent='python-ironicclient')
else:
return HTTPClient(*args, **kwargs)
|
gylian/sickrage | refs/heads/master | lib/rtorrent/lib/xmlrpc/basic_auth.py | 95 | #
# Copyright (c) 2013 Dean Gardiner, <gardiner91@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from base64 import encodestring
import string
import xmlrpclib
class BasicAuthTransport(xmlrpclib.Transport):
def __init__(self, username=None, password=None):
xmlrpclib.Transport.__init__(self)
self.username = username
self.password = password
def send_auth(self, h):
if self.username is not None and self.password is not None:
h.putheader('AUTHORIZATION', "Basic %s" % string.replace(
encodestring("%s:%s" % (self.username, self.password)),
"\012", ""
))
def single_request(self, host, handler, request_body, verbose=0):
# issue XML-RPC request
h = self.make_connection(host)
if verbose:
h.set_debuglevel(1)
try:
self.send_request(h, handler, request_body)
self.send_host(h, host)
self.send_user_agent(h)
self.send_auth(h)
self.send_content(h, request_body)
response = h.getresponse(buffering=True)
if response.status == 200:
self.verbose = verbose
return self.parse_response(response)
except xmlrpclib.Fault:
raise
except Exception:
self.close()
raise
#discard any response data and raise exception
if response.getheader("content-length", 0):
response.read()
raise xmlrpclib.ProtocolError(
host + handler,
response.status, response.reason,
response.msg,
)
|
aam-at/tensorflow | refs/heads/master | tensorflow/python/distribute/cluster_resolver/slurm_cluster_resolver_test.py | 14 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SlurmClusterResolver."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.distribute.cluster_resolver.slurm_cluster_resolver import expand_hostlist
from tensorflow.python.distribute.cluster_resolver.slurm_cluster_resolver import expand_tasks_per_node
from tensorflow.python.distribute.cluster_resolver.slurm_cluster_resolver import SlurmClusterResolver
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
mock = test.mock
class SlurmClusterResolverTest(test.TestCase):
def test_expand_hostlist(self):
self.assertEqual(expand_hostlist('n1'), ['n1'])
self.assertEqual(expand_hostlist('n[1,3]'), ['n1', 'n3'])
self.assertEqual(expand_hostlist('n[1-3]'), ['n1', 'n2', 'n3'])
self.assertEqual(
expand_hostlist('n[1-2],m5,o[3-4,6,7-9]'),
['n1', 'n2', 'm5', 'o3', 'o4', 'o6', 'o7', 'o8', 'o9'])
def test_expand_tasks_per_node(self):
self.assertEqual(expand_tasks_per_node('2'), [2])
self.assertEqual(expand_tasks_per_node('2,1,3'), [2, 1, 3])
self.assertEqual(expand_tasks_per_node('3(x2),2,1'), [3, 3, 2, 1])
self.assertEqual(
expand_tasks_per_node('3(x2),2,11(x4)'), [3, 3, 2, 11, 11, 11, 11])
self.assertEqual(
expand_tasks_per_node('13(x10)'),
[13, 13, 13, 13, 13, 13, 13, 13, 13, 13])
def _verifyClusterSpecEquality(self, cluster_spec, expected_proto):
self.assertProtoEquals(expected_proto, cluster_spec.as_cluster_def())
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec).as_cluster_def())
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec.as_cluster_def()).as_cluster_def())
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec.as_dict()).as_cluster_def())
@mock.patch.dict(
os.environ, {
'SLURM_PROCID': '0',
'SLURM_STEP_NUM_TASKS': '3',
'SLURM_STEP_TASKS_PER_NODE': '1(x3)',
'SLURM_STEP_NODELIST': 't02n13,t02n41,t02n43',
'CUDA_VISIBLE_DEVICES': '0',
})
def testSimpleRetrievalFromEnv(self):
slurm_cluster_resolver = SlurmClusterResolver()
actual_cluster_spec = slurm_cluster_resolver.cluster_spec()
expected_proto = """
job { name: 'worker' tasks { key: 0 value: 't02n13:8888' }
tasks { key: 1 value: 't02n41:8888' }
tasks { key: 2 value: 't02n43:8888' } }
"""
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
self.assertEqual(
slurm_cluster_resolver.master('worker', 0, rpc_layer='grpc'),
'grpc://t02n13:8888')
self.assertEqual(slurm_cluster_resolver.num_accelerators(), {'GPU': 1})
self.assertEqual(os.environ['CUDA_VISIBLE_DEVICES'], '0')
@mock.patch.dict(
os.environ, {
'SLURM_PROCID': '0',
'SLURM_STEP_NUM_TASKS': '3',
'SLURM_STEP_NODELIST': 't02n13,t02n41,t02n43',
})
def testSimpleSuccessfulRetrieval(self):
slurm_cluster_resolver = SlurmClusterResolver(
jobs={
'ps': 1,
'worker': 2
},
port_base=8888,
tasks_per_node=1,
gpus_per_node=1,
gpus_per_task=1,
auto_set_gpu=False)
actual_cluster_spec = slurm_cluster_resolver.cluster_spec()
expected_proto = """
job { name: 'ps' tasks { value: 't02n13:8888' } }
job { name: 'worker' tasks { key: 0 value: 't02n41:8888' }
tasks { key: 1 value: 't02n43:8888' } }
"""
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
@mock.patch.dict(
os.environ, {
'SLURM_PROCID': '0',
'SLURM_STEP_NUM_TASKS': '3',
'SLURM_STEP_NODELIST': 't02n13,t02n41,t02n43',
})
def testSimpleMasterRetrieval(self):
slurm_cluster_resolver = SlurmClusterResolver(
jobs={
'ps': 1,
'worker': 2
},
port_base=8888,
tasks_per_node=1,
gpus_per_node=1,
gpus_per_task=1,
auto_set_gpu=False)
slurm_cluster_resolver.task_type = 'worker'
slurm_cluster_resolver.task_id = 1
self.assertEqual(slurm_cluster_resolver.master(), 'grpc://t02n43:8888')
slurm_cluster_resolver.rpc_layer = 'ab'
self.assertEqual(slurm_cluster_resolver.master('ps', 0), 'ab://t02n13:8888')
self.assertEqual(
slurm_cluster_resolver.master('ps', 0, rpc_layer='test'),
'test://t02n13:8888')
@mock.patch.dict(
os.environ, {
'SLURM_PROCID': '0',
'SLURM_STEP_NUM_TASKS': '3',
'SLURM_STEP_TASKS_PER_NODE': '1(x3)',
'SLURM_STEP_NODELIST': 't02n13,t02n41,t02n43',
})
def testTaskPerNodeNotSetRetrieval(self):
slurm_cluster_resolver = SlurmClusterResolver(
jobs={
'ps': 1,
'worker': 2
},
port_base=8888,
gpus_per_node=1,
gpus_per_task=1,
auto_set_gpu=False)
actual_cluster_spec = slurm_cluster_resolver.cluster_spec()
expected_proto = """
job { name: 'ps' tasks { value: 't02n13:8888' } }
job { name: 'worker' tasks { key: 0 value: 't02n41:8888' }
tasks { key: 1 value: 't02n43:8888' } }
"""
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
@mock.patch.dict(
os.environ, {
'SLURM_PROCID': '1',
'SLURM_STEP_NUM_TASKS': '5',
'SLURM_STEP_TASKS_PER_NODE': '2(x2),1',
'SLURM_STEP_NODELIST': 't02n13,t02n41,t02n43',
'CUDA_VISIBLE_DEVICES': '',
})
def testMultiTaskPerNodeRetrieval(self):
slurm_cluster_resolver = SlurmClusterResolver(
jobs={
'ps': 1,
'worker': 4
},
port_base=8888,
gpus_per_node=2,
gpus_per_task=1,
auto_set_gpu=True)
actual_cluster_spec = slurm_cluster_resolver.cluster_spec()
expected_proto = """
job { name: 'ps' tasks { value: 't02n13:8888' } }
job { name: 'worker' tasks { key: 0 value: 't02n13:8889' }
tasks { key: 1 value: 't02n41:8888' }
tasks { key: 2 value: 't02n41:8889' }
tasks { key: 3 value: 't02n43:8888' } }
"""
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
assert os.environ['CUDA_VISIBLE_DEVICES'] == '1'
@mock.patch.dict(
os.environ, {
'SLURM_PROCID': '1',
'SLURM_STEP_NUM_TASKS': '5',
'SLURM_STEP_TASKS_PER_NODE': '2(x2),1',
'SLURM_STEP_NODELIST': 't02n13,t02n41,t02n43',
'CUDA_VISIBLE_DEVICES': '',
})
def testMultipleGpusPerTaskRetrieval(self):
slurm_cluster_resolver = SlurmClusterResolver(
jobs={
'ps': 1,
'worker': 4
},
port_base=8888,
gpus_per_node=4,
gpus_per_task=2,
auto_set_gpu=True)
actual_cluster_spec = slurm_cluster_resolver.cluster_spec()
expected_proto = """
job { name: 'ps' tasks { value: 't02n13:8888' } }
job { name: 'worker' tasks { key: 0 value: 't02n13:8889' }
tasks { key: 1 value: 't02n41:8888' }
tasks { key: 2 value: 't02n41:8889' }
tasks { key: 3 value: 't02n43:8888' } }
"""
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
assert os.environ['CUDA_VISIBLE_DEVICES'] == '2,3'
if __name__ == '__main__':
test.main()
|
GunoH/intellij-community | refs/heads/master | python/testData/intentions/PyConvertTypeCommentToVariableAnnotationIntentionTest/assignmentWithComplexUnpacking.py | 31 | [y, (x, (z))] = undefined() # ty<caret>pe: Optional[Union[None, Any]], (Callable[..., int], Any) |
glaunay/pyproteinsExt | refs/heads/master | src/pyproteinsExt/structure/__init__.py | 1 | from .coordinates import Parser
from .ccmap_wrapper import ccmap |
PythonProgramming/Kivy-Basics-Tutorials | refs/heads/master | KivyVideo6.py | 1 | from kivy.app import App
#kivy.require("1.8.0")
from kivy.uix.floatlayout import FloatLayout
class SimpleKivy4(App):
def build(self):
return FloatLayout()
if __name__ == "__main__":
SimpleKivy4().run()
|
ellonweb/merlin | refs/heads/master | Hooks/intel/search.py | 2 | # This file is part of Merlin.
# Merlin is the Copyright (C)2008,2009,2010 of Robin K. Hansen, Elliot Rosemarine, Andreas Jacobsen.
# Individual portions may be copyright by individual contributors, and
# are included in this collective work with permission of the copyright
# owners.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from sqlalchemy import and_, or_
from Core.db import session
from Core.maps import Planet, Alliance, Intel
from Core.loadable import loadable, route
class search(loadable):
"""Search for a planet by alliance or nick."""
usage = " <alliance|nick>"
@route(r"(\S+)", access = "member")
def execute(self, message, user, params):
param = "%"+params.group(1)+"%"
Q = session.query(Planet, Intel, Alliance)
Q = Q.join(Planet.intel)
Q = Q.outerjoin(Intel.alliance)
Q = Q.filter(Planet.active == True)
Q = Q.filter(or_(Intel.nick.ilike(param), and_(Alliance.name.ilike(param), Alliance.active == True)))
result = Q[:6]
if len(result) < 1:
message.reply("No planets in intel matching nick or alliance: %s"%(params.group(1),))
return
replies = []
for planet, intel, alliance in result[:5]:
reply="%s:%s:%s (%s)" % (planet.x,planet.y,planet.z,planet.race)
reply+=" Score: %s Value: %s Size: %s" % (planet.score,planet.value,planet.size)
if intel.nick:
reply+=" Nick: %s" % (intel.nick,)
if alliance:
reply+=" Alliance: %s" % (alliance.name,)
if intel.reportchan:
reply+=" Reportchan: %s" % (intel.reportchan,)
if intel.comment:
reply+=" Comment: %s" % (intel.comment,)
replies.append(reply)
if len(result) > 5:
replies[-1]+=" (Too many results to list, please refine your search)"
message.reply("\n".join(replies))
|
boberfly/gaffer | refs/heads/master | python/GafferUI/GLWidget.py | 2 | ##########################################################################
#
# Copyright (c) 2011, John Haddon. All rights reserved.
# Copyright (c) 2011-2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import sys
import logging
import collections
# the OpenGL module loves spewing things into logs, and for some reason
# when running in maya 2012 the default log level allows info messages through.
# so we set a specific log level on the OpenGL logger to keep it quiet.
logging.getLogger( "OpenGL" ).setLevel( logging.WARNING )
import imath
import IECore
import IECoreGL
import Gaffer
import GafferUI
from . import _GafferUI
import OpenGL.GL as GL
import Qt
from Qt import QtCore
from Qt import QtGui
from Qt import QtWidgets
from Qt import QtOpenGL
## The GLWidget is a base class for all widgets which wish to draw using OpenGL.
# Derived classes override the _draw() method to achieve this.
class GLWidget( GafferUI.Widget ) :
## This enum defines the optional elements of the GL buffer used
# for display.
BufferOptions = IECore.Enum.create(
"Alpha",
"Depth",
"Double",
"AntiAlias"
)
## Note that you won't always get the buffer options you ask for - a best fit is found
# among the available formats. In particular it appears that a depth buffer is often present
# even when not requested.
def __init__( self, bufferOptions = set(), **kw ) :
format = QtOpenGL.QGLFormat()
format.setRgba( True )
format.setAlpha( self.BufferOptions.Alpha in bufferOptions )
format.setDepth( self.BufferOptions.Depth in bufferOptions )
format.setDoubleBuffer( self.BufferOptions.Double in bufferOptions )
self.__multisample = self.BufferOptions.AntiAlias in bufferOptions
if self.__multisample:
format.setSampleBuffers( True )
format.setSamples( 8 )
if hasattr( format, "setVersion" ) : # setVersion doesn't exist in qt prior to 4.7.
format.setVersion( 2, 1 )
graphicsView = _GLGraphicsView( format )
self.__graphicsScene = _GLGraphicsScene( graphicsView, Gaffer.WeakMethod( self.__draw ) )
graphicsView.setScene( self.__graphicsScene )
GafferUI.Widget.__init__( self, graphicsView, **kw )
self.__overlays = set()
## Adds a widget to be overlaid on top of the GL rendering,
# stretched to fill the frame.
def addOverlay( self, overlay ) :
if overlay in self.__overlays :
return
oldParent = overlay.parent()
if oldParent is not None :
oldParent.removeChild( child )
self.__overlays.add( overlay )
overlay._setStyleSheet()
if Qt.__binding__ in ( "PySide2", "PyQt5" ) :
# Force Qt to use a raster drawing path for the overlays.
#
# - On Mac, this avoids "QMacCGContext:: Unsupported painter devtype type 1"
# errors. See https://bugreports.qt.io/browse/QTBUG-32639 for
# further details.
# - On Linux, this avoids an unknown problem which manifests as
# a GL error that appears to occur inside Qt's code, and which
# is accompanied by text drawing being scrambled in the overlay.
#
## \todo When we no longer need to support Qt4, we should be
# able to stop using a QGLWidget for the viewport, and this
# should no longer be needed.
overlay._qtWidget().setWindowOpacity( 0.9999 )
self.__graphicsScene.addOverlay( overlay )
def removeOverlay( self, overlay ) :
self.removeChild( overlay )
def removeChild( self, child ) :
assert( child in self.__overlays )
self.__graphicsScene.removeOverlay( child )
self.__overlays.remove( child )
## Called whenever the widget is resized. May be reimplemented by derived
# classes if necessary. The appropriate OpenGL context will already be current
# when this is called.
def _resize( self, size ) :
GL.glViewport( 0, 0, size.x, size.y )
## Derived classes must override this to draw their contents using
# OpenGL calls. The appropriate OpenGL context will already be current
# when this is called.
def _draw( self ) :
pass
## Derived classes may call this when they wish to trigger a redraw.
def _redraw( self ) :
self._glWidget().update()
## May be used by derived classes to get access to the internal
# QGLWidget. Note that _makeCurrent() should be used in preference
# to _glWidget().makeCurrent(), for the reasons stated in the
# documentation for that method.
def _glWidget( self ) :
return self._qtWidget().viewport()
## May be used by derived classes to make the OpenGL context
# for this widget current. Returns True if the operation was
# successful and False if not. In an ideal world, the return
# value would always be True, but it appears that there are
# Qt/Mac bugs which cause it not to be from time to time -
# typically for newly created Widgets. If False is returned,
# no OpenGL operations should be undertaken subsequently by
# the caller.
def _makeCurrent( self ) :
self._qtWidget().viewport().makeCurrent()
return self.__framebufferValid()
def __framebufferValid( self ) :
import OpenGL.GL.framebufferobjects
return GL.framebufferobjects.glCheckFramebufferStatus( GL.framebufferobjects.GL_FRAMEBUFFER ) == GL.framebufferobjects.GL_FRAMEBUFFER_COMPLETE
def __draw( self ) :
if not self.__framebufferValid() :
return
# we need to call the init method after a GL context has been
# created, and this seems like the only place that is guaranteed.
# calling it here does mean we call init() way more than needed,
# but it's safe.
## \todo: this might be removable if we can prove resizeEvent
# is always called first.
IECoreGL.init( True )
if self.__multisample:
GL.glEnable( GL.GL_MULTISAMPLE )
self._draw()
class _GLGraphicsView( QtWidgets.QGraphicsView ) :
def __init__( self, format ) :
QtWidgets.QGraphicsView.__init__( self )
self.setHorizontalScrollBarPolicy( QtCore.Qt.ScrollBarAlwaysOff )
self.setVerticalScrollBarPolicy( QtCore.Qt.ScrollBarAlwaysOff )
glWidget = self.__createQGLWidget( format )
# On mac, we need to hide the GL widget until the last
# possible moment, otherwise we get "invalid drawable"
# errors spewing all over the place. See event() for the
# spot where we show the widget.
glWidget.hide()
self.setViewport( glWidget )
self.setViewportUpdateMode( self.FullViewportUpdate )
# QAbstractScrollArea (one of our base classes), implements
# minimumSizeHint() to include enough room for scrollbars.
# But we know we'll never show scrollbars, and don't want
# a minimum size, so we reimplement it.
def minimumSizeHint( self ) :
return QtCore.QSize()
def event( self, event ) :
if event.type() == event.PolishRequest :
# This seems to be the one signal that reliably
# lets us know we're becoming genuinely visible
# on screen. We use it to show the GL widget we
# hid in our constructor.
self.viewport().show()
return QtWidgets.QGraphicsView.event( self, event )
def resizeEvent( self, event ) :
if self.scene() is not None :
self.scene().setSceneRect( 0, 0, event.size().width(), event.size().height() )
owner = GafferUI.Widget._owner( self )
# clear any existing errors that may trigger
# error checking code in _resize implementations.
while GL.glGetError() :
pass
owner._makeCurrent()
# We need to call the init method after a GL context has been
# created, but before any events requiring GL have been triggered.
# We had been doing this from GLWidget.__draw(), but it was still
# possible to trigger mouseMove events prior to drawing by hovering
# over top of an about-to-become-visible GLWidget. resizeEvent
# seems to always be triggered prior to both draw and mouseMove,
# ensuring GL is initialized in time for those other events.
# Calling it here does mean we call init() more than needed,
# but it's safe.
IECoreGL.init( True )
owner._resize( imath.V2i( event.size().width(), event.size().height() ) )
def keyPressEvent( self, event ) :
# We have to reimplement this method to prevent QAbstractScrollArea
# from stealing the cursor keypresses, preventing them from
# being used by GLWidget subclasses. QAbstractScrollArea uses
# those keypresses to move the scrollbars, but we don't want the
# scrolling functionality at all. Our implementation of this method
# is functionally identical to the QGraphicsView one, except it
# passes unused events to QFrame, bypassing QAbstractScrollArea.
if self.scene() is not None and self.isInteractive() :
QtWidgets.QApplication.sendEvent( self.scene(), event )
if event.isAccepted() :
return
QtWidgets.QFrame.keyPressEvent( self, event )
# We keep a single hidden widget which owns the texture and display lists
# and then share those with all the widgets we really want to make.
__shareWidget = None
@classmethod
def __createQGLWidget( cls, format ) :
# try to make a host specific widget if necessary.
result = cls.__createMayaQGLWidget( format )
if result is not None :
return result
result = cls.__createHoudiniQGLWidget( format )
if result is not None :
return result
# and if it wasn't necessary, just breathe a sigh of relief
# and make a nice normal one.
if cls.__shareWidget is None :
cls.__shareWidget = QtOpenGL.QGLWidget()
return QtOpenGL.QGLWidget( format, shareWidget = cls.__shareWidget )
@classmethod
def __createHostedQGLWidget( cls, format ) :
# When running Gaffer embedded in a host application such as Maya
# or Houdini, we want to be able to share OpenGL resources between
# gaffer uis and host viewport uis, because IECoreGL will be used
# in both. So we implement our own QGLContext class which creates a
# context which shares with the host. The custom QGLContext is
# implemented in GLWidgetBinding.cpp, and automatically shares with
# the context which is current at the time of its creation. The host
# context should therefore be made current before calling this
# method.
result = QtOpenGL.QGLWidget()
_GafferUI._glWidgetSetHostedContext( GafferUI._qtAddress( result ), GafferUI._qtAddress( format ) )
return result
@classmethod
def __createMayaQGLWidget( cls, format ) :
try :
import maya.OpenMayaRender
except ImportError :
# we're not in maya - createQGLWidget() will just make a
# normal widget.
return None
mayaRenderer = maya.OpenMayaRender.MHardwareRenderer.theRenderer()
mayaRenderer.makeResourceContextCurrent( mayaRenderer.backEndString() )
return cls.__createHostedQGLWidget( format )
@classmethod
def __createHoudiniQGLWidget( cls, format ) :
try :
import hou
except ImportError :
# we're not in houdini - createQGLWidget() will just make a
# normal widget.
return None
import IECoreHoudini
if hasattr( IECoreHoudini, "sharedGLWidget" ) :
# In Houdini 14 and 15, Qt is the native UI, and we can access
# Houdini's shared QGLWidget directly.
return QtOpenGL.QGLWidget( format, shareWidget = GafferUI._qtObject( IECoreHoudini.sharedGLWidget(), QtOpenGL.QGLWidget ) )
# While Qt is the native UI in Houdini 16.0, they have moved away
# from QGLWidgets for their Qt5 builds, so we need to force the
# Houdini GL context to be current, and share it.
IECoreHoudini.makeMainGLContextCurrent()
return cls.__createHostedQGLWidget( format )
class _GLGraphicsScene( QtWidgets.QGraphicsScene ) :
__Overlay = collections.namedtuple( "__Overlay", [ "widget", "proxy" ] )
def __init__( self, parent, backgroundDrawFunction ) :
QtWidgets.QGraphicsScene.__init__( self, parent )
self.__backgroundDrawFunction = backgroundDrawFunction
self.sceneRectChanged.connect( self.__sceneRectChanged )
self.__overlays = {} # Mapping from GafferUI.Widget to _OverlayProxyWidget
def addOverlay( self, widget ) :
if widget._qtWidget().layout() is not None :
# removing the size constraint is necessary to keep the widget the
# size we tell it to be in __updateItemGeometry.
widget._qtWidget().layout().setSizeConstraint( QtWidgets.QLayout.SetNoConstraint )
proxy = _OverlayProxyWidget()
proxy.setWidget( widget._qtWidget() )
self.__overlays[widget] = proxy
self.addItem( proxy )
self.__updateItemGeometry( proxy, self.sceneRect() )
def removeOverlay( self, widget ) :
item = self.__overlays[widget]
item.setWidget( None )
self.removeItem( item )
del self.__overlays[widget]
def drawBackground( self, painter, rect ) :
painter.beginNativePainting()
# Qt sometimes enters this method with a GL error flag still set.
# We unset it here so it won't trigger our own error checking.
while GL.glGetError() :
pass
GL.glPushAttrib( GL.GL_ALL_ATTRIB_BITS )
GL.glPushClientAttrib( GL.GL_CLIENT_ALL_ATTRIB_BITS )
self.__backgroundDrawFunction()
GL.glPopClientAttrib()
GL.glPopAttrib()
painter.endNativePainting()
## QGraphicsScene consumes all drag events by default, which is unhelpful
# for us as it breaks any Qt based drag-drop we may be attempting.
def dragEnterEvent( self, event ) :
event.ignore()
def __sceneRectChanged( self, sceneRect ) :
for proxy in self.__overlays.values() :
self.__updateItemGeometry( proxy, sceneRect )
def __updateItemGeometry( self, item, sceneRect ) :
item.widget().setGeometry( QtCore.QRect( 0, 0, sceneRect.width(), sceneRect.height() ) )
## A QGraphicsProxyWidget whose shape is composed from the
# bounds of its child widgets. This allows our overlays to
# pass through events in the regions where there isn't a
# child widget.
class _OverlayProxyWidget( QtWidgets.QGraphicsProxyWidget ) :
def __init__( self ) :
QtWidgets.QGraphicsProxyWidget.__init__( self )
def shape( self ) :
path = QtGui.QPainterPath()
path.addRegion( self.widget().childrenRegion() )
return path
|
2013Commons/hue | refs/heads/master | desktop/core/ext-py/Django-1.4.5/tests/regressiontests/m2m_through_regress/__init__.py | 45382 | |
btabibian/scikit-learn | refs/heads/master | sklearn/linear_model/sag.py | 30 | """Solvers for Ridge and LogisticRegression using SAG algorithm"""
# Authors: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import make_dataset
from .sag_fast import sag
from ..exceptions import ConvergenceWarning
from ..utils import check_array
from ..utils.extmath import row_norms
def get_auto_step_size(max_squared_sum, alpha_scaled, loss, fit_intercept,
n_samples=None,
is_saga=False):
"""Compute automatic step size for SAG solver
The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
the max sum of squares for over all samples.
Parameters
----------
max_squared_sum : float
Maximum squared sum of X over samples.
alpha_scaled : float
Constant that multiplies the regularization term, scaled by
1. / n_samples, the number of samples.
loss : string, in {"log", "squared"}
The loss function used in SAG solver.
fit_intercept : bool
Specifies if a constant (a.k.a. bias or intercept) will be
added to the decision function.
n_samples : int, optional
Number of rows in X. Useful if is_saga=True.
is_saga : boolean, optional
Whether to return step size for the SAGA algorithm or the SAG
algorithm.
Returns
-------
step_size : float
Step size used in SAG solver.
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
Defazio, A., Bach F. & Lacoste-Julien S. (2014).
SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives
https://arxiv.org/abs/1407.0202
"""
if loss in ('log', 'multinomial'):
L = (0.25 * (max_squared_sum + int(fit_intercept)) + alpha_scaled)
elif loss == 'squared':
# inverse Lipschitz constant for squared loss
L = max_squared_sum + int(fit_intercept) + alpha_scaled
else:
raise ValueError("Unknown loss function for SAG solver, got %s "
"instead of 'log' or 'squared'" % loss)
if is_saga:
# SAGA theoretical step size is 1/3L or 1 / (2 * (L + mu n))
# See Defazio et al. 2014
mun = min(2 * n_samples * alpha_scaled, L)
step = 1. / (2 * L + mun)
else:
# SAG theoretical step size is 1/16L but it is recommended to use 1 / L
# see http://www.birs.ca//workshops//2014/14w5003/files/schmidt.pdf,
# slide 65
step = 1. / L
return step
def sag_solver(X, y, sample_weight=None, loss='log', alpha=1., beta=0.,
max_iter=1000, tol=0.001, verbose=0, random_state=None,
check_input=True, max_squared_sum=None,
warm_start_mem=None,
is_saga=False):
"""SAG solver for Ridge and LogisticRegression
SAG stands for Stochastic Average Gradient: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a constant learning rate.
IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the
same scale. You can normalize the data by using
sklearn.preprocessing.StandardScaler on your data before passing it to the
fit method.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values for the features. It will
fit the data according to squared loss or log loss.
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using the squared euclidean norm L2.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values. With loss='multinomial', y must be label encoded
(see preprocessing.LabelEncoder).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
loss : 'log' | 'squared' | 'multinomial'
Loss function that will be optimized:
-'log' is the binary logistic loss, as used in LogisticRegression.
-'squared' is the squared loss, as used in Ridge.
-'multinomial' is the multinomial logistic loss, as used in
LogisticRegression.
.. versionadded:: 0.18
*loss='multinomial'*
alpha : float, optional
Constant that multiplies the regularization term. Defaults to 1.
max_iter : int, optional
The max number of passes over the training data if the stopping
criteria is not reached. Defaults to 1000.
tol : double, optional
The stopping criteria for the weights. The iterations will stop when
max(change in weights) / max(weights) < tol. Defaults to .001
verbose : integer, optional
The verbosity level.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. If None, it will be computed,
going through all the samples. The value should be precomputed
to speed up cross validation.
warm_start_mem : dict, optional
The initialization parameters used for warm starting. Warm starting is
currently used in LogisticRegression but not in Ridge.
It contains:
- 'coef': the weight vector, with the intercept in last line
if the intercept is fitted.
- 'gradient_memory': the scalar gradient for all seen samples.
- 'sum_gradient': the sum of gradient over all seen samples,
for each feature.
- 'intercept_sum_gradient': the sum of gradient over all seen
samples, for the intercept.
- 'seen': array of boolean describing the seen samples.
- 'num_seen': the number of seen samples.
is_saga : boolean, optional
Whether to use the SAGA algorithm or the SAG algorithm. SAGA behaves
better in the first epochs, and allow for l1 regularisation.
Returns
-------
coef_ : array, shape (n_features)
Weight vector.
n_iter_ : int
The number of full pass on all samples.
warm_start_mem : dict
Contains a 'coef' key with the fitted result, and possibly the
fitted intercept at the end of the array. Contains also other keys
used for warm starting.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> X = np.random.randn(n_samples, n_features)
>>> y = np.random.randn(n_samples)
>>> clf = linear_model.Ridge(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='sag', tol=0.001)
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> clf = linear_model.LogisticRegression(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
LogisticRegression(C=1.0, class_weight=None, dual=False,
fit_intercept=True, intercept_scaling=1, max_iter=100,
multi_class='ovr', n_jobs=1, penalty='l2', random_state=None,
solver='sag', tol=0.0001, verbose=0, warm_start=False)
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
Defazio, A., Bach F. & Lacoste-Julien S. (2014).
SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives
https://arxiv.org/abs/1407.0202
See also
--------
Ridge, SGDRegressor, ElasticNet, Lasso, SVR, and
LogisticRegression, SGDClassifier, LinearSVC, Perceptron
"""
if warm_start_mem is None:
warm_start_mem = {}
# Ridge default max_iter is None
if max_iter is None:
max_iter = 1000
if check_input:
X = check_array(X, dtype=np.float64, accept_sparse='csr', order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='C')
n_samples, n_features = X.shape[0], X.shape[1]
# As in SGD, the alpha is scaled by n_samples.
alpha_scaled = float(alpha) / n_samples
beta_scaled = float(beta) / n_samples
# if loss == 'multinomial', y should be label encoded.
n_classes = int(y.max()) + 1 if loss == 'multinomial' else 1
# initialization
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
if 'coef' in warm_start_mem.keys():
coef_init = warm_start_mem['coef']
else:
# assume fit_intercept is False
coef_init = np.zeros((n_features, n_classes), dtype=np.float64,
order='C')
# coef_init contains possibly the intercept_init at the end.
# Note that Ridge centers the data before fitting, so fit_intercept=False.
fit_intercept = coef_init.shape[0] == (n_features + 1)
if fit_intercept:
intercept_init = coef_init[-1, :]
coef_init = coef_init[:-1, :]
else:
intercept_init = np.zeros(n_classes, dtype=np.float64)
if 'intercept_sum_gradient' in warm_start_mem.keys():
intercept_sum_gradient = warm_start_mem['intercept_sum_gradient']
else:
intercept_sum_gradient = np.zeros(n_classes, dtype=np.float64)
if 'gradient_memory' in warm_start_mem.keys():
gradient_memory_init = warm_start_mem['gradient_memory']
else:
gradient_memory_init = np.zeros((n_samples, n_classes),
dtype=np.float64, order='C')
if 'sum_gradient' in warm_start_mem.keys():
sum_gradient_init = warm_start_mem['sum_gradient']
else:
sum_gradient_init = np.zeros((n_features, n_classes),
dtype=np.float64, order='C')
if 'seen' in warm_start_mem.keys():
seen_init = warm_start_mem['seen']
else:
seen_init = np.zeros(n_samples, dtype=np.int32, order='C')
if 'num_seen' in warm_start_mem.keys():
num_seen_init = warm_start_mem['num_seen']
else:
num_seen_init = 0
dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state)
if max_squared_sum is None:
max_squared_sum = row_norms(X, squared=True).max()
step_size = get_auto_step_size(max_squared_sum, alpha_scaled, loss,
fit_intercept, n_samples=n_samples,
is_saga=is_saga)
if step_size * alpha_scaled == 1:
raise ZeroDivisionError("Current sag implementation does not handle "
"the case step_size * alpha_scaled == 1")
num_seen, n_iter_ = sag(dataset, coef_init,
intercept_init, n_samples,
n_features, n_classes, tol,
max_iter,
loss,
step_size, alpha_scaled,
beta_scaled,
sum_gradient_init,
gradient_memory_init,
seen_init,
num_seen_init,
fit_intercept,
intercept_sum_gradient,
intercept_decay,
is_saga,
verbose)
if n_iter_ == max_iter:
warnings.warn("The max_iter was reached which means "
"the coef_ did not converge", ConvergenceWarning)
if fit_intercept:
coef_init = np.vstack((coef_init, intercept_init))
warm_start_mem = {'coef': coef_init, 'sum_gradient': sum_gradient_init,
'intercept_sum_gradient': intercept_sum_gradient,
'gradient_memory': gradient_memory_init,
'seen': seen_init, 'num_seen': num_seen}
if loss == 'multinomial':
coef_ = coef_init.T
else:
coef_ = coef_init[:, 0]
return coef_, n_iter_, warm_start_mem
|
lem9/weblate | refs/heads/master | weblate/accounts/migrations/0016_add-api-keys.py | 3 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-22 10:23
from __future__ import unicode_literals
from django.db import migrations
from django.utils.crypto import get_random_string
def add_api_keys(apps, schema_editor):
Token = apps.get_model('authtoken', 'Token')
User = apps.get_model('auth', 'User')
for user in User.objects.iterator():
Token.objects.get_or_create(
user=user,
defaults={'key': get_random_string(40)}
)
class Migration(migrations.Migration):
dependencies = [
('accounts', '0015_auto_20160304_1418'),
('authtoken', '0001_initial'),
('auth', '0001_initial'),
]
operations = [
migrations.RunPython(add_api_keys),
]
|
freedomhui/cinder | refs/heads/localstorage | cinder/tests/test_test.py | 8 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the testing base code."""
from cinder.openstack.common import rpc
from cinder import test
class IsolationTestCase(test.TestCase):
"""Ensure that things are cleaned up after failed tests.
These tests don't really do much here, but if isolation fails a bunch
of other tests should fail.
"""
def test_service_isolation(self):
self.start_service('volume')
def test_rpc_consumer_isolation(self):
class NeverCalled(object):
def __getattribute__(*args):
assert False, "I should never get called."
connection = rpc.create_connection(new=True)
proxy = NeverCalled()
connection.create_consumer('volume', proxy, fanout=False)
connection.consume_in_thread()
|
alvarolopez/nova | refs/heads/master | nova/api/openstack/compute/schemas/v3/flavors_extraspecs.py | 109 | # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from nova.api.validation import parameter_types
# NOTE(oomichi): The metadata of flavor_extraspecs should accept numbers
# as its values.
metadata = copy.deepcopy(parameter_types.metadata)
metadata['patternProperties']['^[a-zA-Z0-9-_:. ]{1,255}$']['type'] = \
['string', 'number']
create = {
'type': 'object',
'properties': {
'extra_specs': metadata
},
'required': ['extra_specs'],
'additionalProperties': False,
}
update = copy.deepcopy(metadata)
update.update({
'minProperties': 1,
'maxProperties': 1
})
|
Ballz0fSteel/Umeko | refs/heads/master | lib/youtube_dl/extractor/viu.py | 25 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_kwargs,
compat_str,
)
from ..utils import (
ExtractorError,
int_or_none,
)
class ViuBaseIE(InfoExtractor):
def _real_initialize(self):
viu_auth_res = self._request_webpage(
'https://www.viu.com/api/apps/v2/authenticate', None,
'Requesting Viu auth', query={
'acct': 'test',
'appid': 'viu_desktop',
'fmt': 'json',
'iid': 'guest',
'languageid': 'default',
'platform': 'desktop',
'userid': 'guest',
'useridtype': 'guest',
'ver': '1.0'
}, headers=self.geo_verification_headers())
self._auth_token = viu_auth_res.info()['X-VIU-AUTH']
def _call_api(self, path, *args, **kwargs):
headers = self.geo_verification_headers()
headers.update({
'X-VIU-AUTH': self._auth_token
})
headers.update(kwargs.get('headers', {}))
kwargs['headers'] = headers
response = self._download_json(
'https://www.viu.com/api/' + path, *args,
**compat_kwargs(kwargs))['response']
if response.get('status') != 'success':
raise ExtractorError('%s said: %s' % (
self.IE_NAME, response['message']), expected=True)
return response
class ViuIE(ViuBaseIE):
_VALID_URL = r'(?:viu:|https?://[^/]+\.viu\.com/[a-z]{2}/media/)(?P<id>\d+)'
_TESTS = [{
'url': 'https://www.viu.com/en/media/1116705532?containerId=playlist-22168059',
'info_dict': {
'id': '1116705532',
'ext': 'mp4',
'title': 'Citizen Khan - Ep 1',
'description': 'md5:d7ea1604f49e5ba79c212c551ce2110e',
},
'params': {
'skip_download': 'm3u8 download',
},
'skip': 'Geo-restricted to India',
}, {
'url': 'https://www.viu.com/en/media/1130599965',
'info_dict': {
'id': '1130599965',
'ext': 'mp4',
'title': 'Jealousy Incarnate - Episode 1',
'description': 'md5:d3d82375cab969415d2720b6894361e9',
},
'params': {
'skip_download': 'm3u8 download',
},
'skip': 'Geo-restricted to Indonesia',
}, {
'url': 'https://india.viu.com/en/media/1126286865',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
video_data = self._call_api(
'clip/load', video_id, 'Downloading video data', query={
'appid': 'viu_desktop',
'fmt': 'json',
'id': video_id
})['item'][0]
title = video_data['title']
m3u8_url = None
url_path = video_data.get('urlpathd') or video_data.get('urlpath')
tdirforwhole = video_data.get('tdirforwhole')
# #EXT-X-BYTERANGE is not supported by native hls downloader
# and ffmpeg (#10955)
# hls_file = video_data.get('hlsfile')
hls_file = video_data.get('jwhlsfile')
if url_path and tdirforwhole and hls_file:
m3u8_url = '%s/%s/%s' % (url_path, tdirforwhole, hls_file)
else:
# m3u8_url = re.sub(
# r'(/hlsc_)[a-z]+(\d+\.m3u8)',
# r'\1whe\2', video_data['href'])
m3u8_url = video_data['href']
formats = self._extract_m3u8_formats(m3u8_url, video_id, 'mp4')
self._sort_formats(formats)
subtitles = {}
for key, value in video_data.items():
mobj = re.match(r'^subtitle_(?P<lang>[^_]+)_(?P<ext>(vtt|srt))', key)
if not mobj:
continue
subtitles.setdefault(mobj.group('lang'), []).append({
'url': value,
'ext': mobj.group('ext')
})
return {
'id': video_id,
'title': title,
'description': video_data.get('description'),
'series': video_data.get('moviealbumshowname'),
'episode': title,
'episode_number': int_or_none(video_data.get('episodeno')),
'duration': int_or_none(video_data.get('duration')),
'formats': formats,
'subtitles': subtitles,
}
class ViuPlaylistIE(ViuBaseIE):
IE_NAME = 'viu:playlist'
_VALID_URL = r'https?://www\.viu\.com/[^/]+/listing/playlist-(?P<id>\d+)'
_TEST = {
'url': 'https://www.viu.com/en/listing/playlist-22461380',
'info_dict': {
'id': '22461380',
'title': 'The Good Wife',
},
'playlist_count': 16,
'skip': 'Geo-restricted to Indonesia',
}
def _real_extract(self, url):
playlist_id = self._match_id(url)
playlist_data = self._call_api(
'container/load', playlist_id,
'Downloading playlist info', query={
'appid': 'viu_desktop',
'fmt': 'json',
'id': 'playlist-' + playlist_id
})['container']
entries = []
for item in playlist_data.get('item', []):
item_id = item.get('id')
if not item_id:
continue
item_id = compat_str(item_id)
entries.append(self.url_result(
'viu:' + item_id, 'Viu', item_id))
return self.playlist_result(
entries, playlist_id, playlist_data.get('title'))
class ViuOTTIE(InfoExtractor):
IE_NAME = 'viu:ott'
_VALID_URL = r'https?://(?:www\.)?viu\.com/ott/(?P<country_code>[a-z]{2})/[a-z]{2}-[a-z]{2}/vod/(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.viu.com/ott/sg/en-us/vod/3421/The%20Prime%20Minister%20and%20I',
'info_dict': {
'id': '3421',
'ext': 'mp4',
'title': 'A New Beginning',
'description': 'md5:1e7486a619b6399b25ba6a41c0fe5b2c',
},
'params': {
'skip_download': 'm3u8 download',
},
'skip': 'Geo-restricted to Singapore',
}, {
'url': 'http://www.viu.com/ott/hk/zh-hk/vod/7123/%E5%A4%A7%E4%BA%BA%E5%A5%B3%E5%AD%90',
'info_dict': {
'id': '7123',
'ext': 'mp4',
'title': '這就是我的生活之道',
'description': 'md5:4eb0d8b08cf04fcdc6bbbeb16043434f',
},
'params': {
'skip_download': 'm3u8 download',
},
'skip': 'Geo-restricted to Hong Kong',
}]
def _real_extract(self, url):
country_code, video_id = re.match(self._VALID_URL, url).groups()
product_data = self._download_json(
'http://www.viu.com/ott/%s/index.php' % country_code, video_id,
'Downloading video info', query={
'r': 'vod/ajax-detail',
'platform_flag_label': 'web',
'product_id': video_id,
})['data']
video_data = product_data.get('current_product')
if not video_data:
raise ExtractorError('This video is not available in your region.', expected=True)
stream_data = self._download_json(
'https://d1k2us671qcoau.cloudfront.net/distribute_web_%s.php' % country_code,
video_id, 'Downloading stream info', query={
'ccs_product_id': video_data['ccs_product_id'],
})['data']['stream']
stream_sizes = stream_data.get('size', {})
formats = []
for vid_format, stream_url in stream_data.get('url', {}).items():
height = int_or_none(self._search_regex(
r's(\d+)p', vid_format, 'height', default=None))
formats.append({
'format_id': vid_format,
'url': stream_url,
'height': height,
'ext': 'mp4',
'filesize': int_or_none(stream_sizes.get(vid_format))
})
self._sort_formats(formats)
subtitles = {}
for sub in video_data.get('subtitle', []):
sub_url = sub.get('url')
if not sub_url:
continue
subtitles.setdefault(sub.get('name'), []).append({
'url': sub_url,
'ext': 'srt',
})
title = video_data['synopsis'].strip()
return {
'id': video_id,
'title': title,
'description': video_data.get('description'),
'series': product_data.get('series', {}).get('name'),
'episode': title,
'episode_number': int_or_none(video_data.get('number')),
'duration': int_or_none(stream_data.get('duration')),
'thumbnail': video_data.get('cover_image_url'),
'formats': formats,
'subtitles': subtitles,
}
|
Edzvu/Edzvu.github.io | refs/heads/master | APNSWrapper-0.6.1/APNSWrapper/notifications.py | 1 | # Copyright 2009 Max Klymyshyn, Sonettic
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
import base64
import binascii
from __init__ import *
from connection import *
from apnsexceptions import *
from utils import _doublequote
NULL = 'null'
class APNSAlert(object):
"""
This is an object to generate properly APNS alert object with
all possible values.
"""
def __init__(self):
self.alertBody = None
self.actionLocKey = None
self.locKey = None
self.locArgs = None
def body(self, alertBody):
"""
The text of the alert message.
"""
if alertBody and not isinstance(alertBody, str):
raise APNSValueError, "Unexpected value of argument. It should be string or None."
self.alertBody = alertBody
return self
def action_loc_key(self, alk = NULL):
"""
If a string is specified, displays an alert with two buttons.
"""
if alk and not isinstance(alk, str):
raise APNSValueError, "Unexpected value of argument. It should be string or None."
self.actionLocKey = alk
return self
def loc_key(self, lk):
"""
A key to an alert-message string in a Localizable.strings file for the current
localization (which is set by the user's language preference).
"""
if lk and not isinstance(lk, str):
raise APNSValueError, "Unexcpected value of argument. It should be string or None"
self.locKey = lk
return self
def loc_args(self, la):
"""
Variable string values to appear in place of the format specifiers in loc-key.
"""
if la and not isinstance(la, (list, tuple)):
raise APNSValueError, "Unexpected type of argument. It should be list or tuple of strings"
self.locArgs = [ '"%s"' % str(x) for x in la ]
return self
def _build(self):
"""
Build object to JSON Apple Push Notification Service string.
"""
arguments = []
if self.alertBody:
arguments.append('"body":"%s"' % _doublequote(self.alertBody))
if self.actionLocKey:
arguments.append('"action-loc-key":"%s"' % _doublequote(self.actionLocKey))
if self.locKey:
arguments.append('"loc-key":"%s"' % _doublequote(self.locKey))
if self.locArgs:
arguments.append('"loc-args":[%s]' % ",".join(self.locArgs))
return ",".join(arguments)
class APNSProperty(object):
"""
This class should describe APNS arguments.
"""
name = None
data = None
def __init__(self, name = None, data = None):
if not name or not isinstance(name, str) or len(name) == 0:
raise APNSValueError, "Name of property argument should be a non-empry string"
if not isinstance(data, (int, str, list, tuple, float)):
raise APNSValueError, "Data argument should be string, number, list of tuple"
self.name = name
self.data = data
def _build(self):
arguments = []
name = '"%s":' % self.name
if isinstance(self.data, (int, float)):
return "%s%s" % (name, str(self.data))
if isinstance(self.data, str) or isinstance(self.data, unicode):
return '%s"%s"' % (name, _doublequote(self.data))
if isinstance(self.data, (tuple, list)):
arguments = map(lambda x: if_else(isinstance(x, str), '"%s"' % _doublequote(str(x)), str(x)), self.data)
return "%s[%s]" % (name, ",".join(arguments))
return '%s%s' % (name, NULL)
class APNSNotificationWrapper(object):
"""
This object wrap a list of APNS tuples. You should use
.append method to add notifications to the list. By usint
method .notify() all notification will send to the APNS server.
"""
sandbox = True
apnsHost = 'gateway.push.apple.com'
apnsSandboxHost = 'gateway.sandbox.push.apple.com'
apnsPort = 2195
payloads = None
connection = None
debug_ssl = False
def __init__(self, certificate = None, sandbox = True, debug_ssl = False, \
force_ssl_command = False):
self.debug_ssl = debug_ssl
self.force_ssl_command = False
self.connection = APNSConnection(certificate = certificate, \
force_ssl_command = self.force_ssl_command, debug = self.debug_ssl)
self.sandbox = sandbox
self.payloads = []
def append(self, payload = None):
"""Append payload to wrapper"""
if not isinstance(payload, APNSNotification):
raise APNSTypeError, "Unexpected argument type. Argument should be an instance of APNSNotification object"
self.payloads.append(payload)
def count(self):
"""Get count of payloads
"""
return len(self.payloads)
def notify(self):
"""
Send nofification to APNS:
1) prepare all internal variables to APNS Payout JSON
2) make connection to APNS server and send notification
"""
payloads = [o.payload() for o in self.payloads]
payloadsLen = sum([len(p) for p in payloads])
messages = []
offset = 0
if len(payloads) == 0:
return False
for p in payloads:
plen = len(p)
messages.append(struct.pack('%ds' % plen, p))
offset += plen
# TODO: make it more correctly
message = "".join(messages)
apnsConnection = self.connection
if self.sandbox != True:
apnsHost = self.apnsHost
else:
apnsHost = self.apnsSandboxHost
apnsConnection.connect(apnsHost, self.apnsPort)
apnsConnection.write(message)
apnsConnection.close()
return True
class APNSNotification(object):
"""
APNSNotificationWrapper wrap Apple Push Notification Service into
python object.
"""
command = 0
badge = None
sound = None
alert = None
deviceToken = None
maxPayloadLength = 256
deviceTokenLength = 32
properties = None
def __init__(self):
"""
Initialization of the APNSNotificationWrapper object.
"""
self.properties = []
self.badgeValue = None
self.soundValue = None
self.alertObject = None
self.deviceToken = None
def token(self, token):
"""
Add deviceToken in binary format.
"""
self.deviceToken = token
return self
def tokenBase64(self, encodedToken):
"""
Add deviceToken as base64 encoded string (not binary)
"""
self.deviceToken = base64.standard_b64decode(encodedToken)
return self
def tokenHex(self, hexToken):
"""
Add deviceToken as a hexToken
Strips out whitespace and <>
"""
hexToken = hexToken.strip().strip('<>').replace(' ','').replace('-', '')
self.deviceToken = binascii.unhexlify(hexToken)
return self
def unbadge(self):
"""Simple shorcut to remove badge from your application.
"""
self.badge(0)
return self
def badge(self, num = None):
"""
Add badge to the notification. If argument is None (by default it is None)
badge will be disabled.
"""
if num == None:
self.badgeValue = None
return self
if not isinstance(num, int):
raise APNSValueError, "Badge argument must be a number"
self.badgeValue = num
return self
def sound(self, sound = 'default'):
"""
Add a custom sound to the noficitaion. By defailt it is default sound ('default')
"""
if sound == None:
self.soundValue = None
return self
self.soundValue = str(sound)
return self
def alert(self, alert = None):
"""
Add an alert to the Wrapper. It should be string or APNSAlert object instance.
"""
if not isinstance(alert, str) and not isinstance(alert, unicode) and not isinstance(alert, APNSAlert):
raise APNSTypeError, "Wrong type of alert argument. Argument should be String, Unicode string or an instance of APNSAlert object"
self.alertObject = alert
return self
def appendProperty(self, *args):
"""
Add a custom property to list of properties.
"""
for prop in args:
if not isinstance(prop, APNSProperty):
raise APNSTypeError, "Wrong type of argument. Argument should be an instance of APNSProperty object"
self.properties.append(prop)
return self
def clearProperties(self):
"""
Clear list of properties.
"""
self.properties = None
def _build(self):
"""
Build all notifications items to one string.
"""
keys = []
apsKeys = []
if self.soundValue:
apsKeys.append('"sound":"%s"' % _doublequote(self.soundValue))
if self.badgeValue:
apsKeys.append('"badge":%d' % int(self.badgeValue))
if self.alertObject != None:
alertArgument = ""
if isinstance(self.alertObject, str):
alertArgument = _doublequote(self.alertObject)
apsKeys.append('"alert":"%s"' % alertArgument)
elif isinstance(self.alertObject, APNSAlert):
alertArgument = self.alertObject._build()
apsKeys.append('"alert":{%s}' % alertArgument)
keys.append('"aps":{%s}' % ",".join(apsKeys))
# prepare properties
for property in self.properties:
keys.append(property._build())
payload = "{%s}" % ",".join(keys)
if len(payload) > self.maxPayloadLength:
raise APNSPayloadLengthError, "Length of Payload more than %d bytes." % self.maxPayloadLength
return payload
def payload(self):
if self.deviceToken == None:
raise APNSUndefinedDeviceToken, "You forget to set deviceToken in your notification."
payload = self._build()
payloadLength = len(payload)
tokenLength = len(self.deviceToken)
tokenFormat = "s" * tokenLength
payloadFormat = "s" * payloadLength
apnsPackFormat = "!BH" + str(tokenLength) + "sH" + str(payloadLength) + "s"
# build notification message in binary format
return struct.pack(apnsPackFormat,
self.command,
tokenLength,
self.deviceToken,
payloadLength,
payload)
|
sebcreme/SmartAlarmClock | refs/heads/master | src/actions/DisplayEvent.py | 6 | from bottle import Bottle, route, run, template, static_file, get, jinja2_template as template, post, request, response, redirect
import json
import requests
import runtime
import actions
"""
DisplayEvent
"""
defaultInputParams = {}
defaultOutputParams = {}
sim_parameters = dict()
# Register actions
def registerAction(user, project, version, sim_id):
sim_parameters['user'] = user
sim_parameters['project'] = project
sim_parameters['version'] = version
sim_parameters['sim_id'] = sim_id
runtime.register_webActions(user, project, version, sim_id, 'DisplayEvent', '/home/actions/DisplayEvent/')
def start():
inputParams = request.json['input']
request_Id = request.json['requestId']
runtime.eventQueue.put(json.dumps(inputParams['event']))
success_url = '{}/api/v1/{}/{}/{}/{}/actions/{}/success'.format(runtime.CRAFT_RUNTIME_SERVER_URL, sim_parameters['user'],sim_parameters['project'],sim_parameters['version'],sim_parameters['sim_id'], request_Id)
r = requests.post(success_url)
return
def cancel():
request_Id = request.json['requestId']
cancel_url = '{}/api/v1/{}/{}/{}/{}/actions/{}/cancelation'.format(runtime.CRAFT_RUNTIME_SERVER_URL, sim_parameters['user'],sim_parameters['project'],sim_parameters['version'],sim_parameters['sim_id'], request_Id)
r = requests.post(cancel_url)
return
|
megemini/DataCastle2017 | refs/heads/master | src/func/kerasnode.py | 1 | # -*- coding: utf-8 -*-
"""
**keras node** module
-------------------
"""
__all__ = ['get_sequential', 'add_dense', 'add_conv2D', 'add_dropout', 'add_maxPooling2D', 'add_flatten', 'set_compile', 'model_fit']
def get_sequential():
return Sequential()
def add_dense(Keras_model, units, activation, input_dim):
Keras_model.add(Dense(units=units, activation=activation, input_dim=input_dim))
return Keras_model
def add_conv2D(Keras_model, filters, kernel_size, strides, activation, input_shape):
Keras_model.add(Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, activation=activation, input_shape=input_shape))
return Keras_model
def add_dropout(Keras_model, rate):
Keras_model.add(Dropout(rate=rate))
return Keras_model
def add_maxPooling2D(Keras_model, pool_size):
Keras_model.add(MaxPooling2D(pool_size=pool_size))
return Keras_model
def add_flatten(Keras_model):
Keras_model.add(Flatten())
return Keras_model
def set_compile(Keras_model, optimizer, loss):
Keras_model.compile(loss=loss, optimizer=optimizer)
return Keras_model
def model_fit(Keras_model, X, y, epochs, batch_size):
Keras_model.fit(X.values.astype(np.float32), y.values.astype(np.float32), batch_size=batch_size, epochs=epochs)
return Keras_model
|
halvertoluke/edx-platform | refs/heads/default_branch | openedx/core/djangoapps/profile_images/tests/helpers.py | 117 | """
Helper methods for use in profile image tests.
"""
from contextlib import contextmanager
import os
from tempfile import NamedTemporaryFile
from django.core.files.uploadedfile import UploadedFile
from PIL import Image
@contextmanager
def make_image_file(dimensions=(320, 240), extension=".jpeg", force_size=None):
"""
Yields a named temporary file created with the specified image type and
options.
Note the default dimensions are unequal (not a square) ensuring that center-square
cropping logic will be exercised during tests.
The temporary file will be closed and deleted automatically upon exiting
the `with` block.
"""
image = Image.new('RGB', dimensions, "green")
image_file = NamedTemporaryFile(suffix=extension)
try:
image.save(image_file)
if force_size is not None:
image_file.seek(0, os.SEEK_END)
bytes_to_pad = force_size - image_file.tell()
# write in hunks of 256 bytes
hunk, byte_ = bytearray([0] * 256), bytearray([0])
num_hunks, remainder = divmod(bytes_to_pad, 256)
for _ in xrange(num_hunks):
image_file.write(hunk)
for _ in xrange(remainder):
image_file.write(byte_)
image_file.flush()
image_file.seek(0)
yield image_file
finally:
image_file.close()
@contextmanager
def make_uploaded_file(content_type, *a, **kw):
"""
Wrap the result of make_image_file in a django UploadedFile.
"""
with make_image_file(*a, **kw) as image_file:
yield UploadedFile(
image_file,
content_type=content_type,
size=os.path.getsize(image_file.name),
)
|
thanhpete/selenium | refs/heads/master | py/selenium/webdriver/opera/webdriver.py | 71 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
try:
import http.client as http_client
except ImportError:
import httplib as http_client
import os
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from selenium.webdriver.chrome.webdriver import WebDriver as ChromiumDriver
from .options import Options
class OperaDriver(ChromiumDriver):
"""Controls the new OperaDriver and allows you
to drive the Opera browser based on Chromium."""
def __init__(self, executable_path=None, port=0,
opera_options=None, service_args=None,
desired_capabilities=None, service_log_path=None):
"""
Creates a new instance of the operadriver.
Starts the service and then creates new instance of operadriver.
:Args:
- executable_path - path to the executable. If the default is used
it assumes the executable is in the $PATH
- port - port you would like the service to run, if left as 0,
a free port will be found.
- desired_capabilities: Dictionary object with non-browser specific
capabilities only, such as "proxy" or "loggingPref".
- chrome_options: this takes an instance of ChromeOptions
"""
executable_path = (executable_path if executable_path is not None
else "operadriver")
ChromiumDriver.__init__(self,
executable_path=executable_path,
port=port,
chrome_options=opera_options,
service_args=service_args,
desired_capabilities=desired_capabilities,
service_log_path=service_log_path)
def create_options(self):
return Options()
class WebDriver(OperaDriver):
class ServiceType:
CHROMIUM = 2
def __init__(self,
desired_capabilities=None,
executable_path=None,
port=0,
service_log_path=None,
service_args=None,
opera_options=None):
OperaDriver.__init__(self, executable_path=executable_path,
port=port, opera_options=opera_options,
service_args=service_args,
desired_capabilities=desired_capabilities,
service_log_path=service_log_path)
|
JunhwanPark/TizenRT | refs/heads/artik | external/iotivity/iotivity_1.2-rel/build_common/iotivityconfig/compiler/configuration.py | 64 | # ------------------------------------------------------------------------
# Copyright 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
class Configuration:
"""Compiler-specific configuration abstract base class"""
def __init__(self, context):
"""
Initialize the Configuration object
Arguments:
context -- the scons configure context
"""
if type(self) is Configuration:
raise TypeError('abstract class cannot be instantiated')
self._context = context # scons configure context
self._env = context.env # scons environment
def check_c99_flags(self):
"""
Check if command line flag is required to enable C99
support.
Returns 1 if no flag is required, 0 if no flag was
found, and the actual flag if one was found.
CFLAGS will be updated with appropriate C99 flag,
accordingly.
"""
return self._check_flags(self._c99_flags(),
self._c99_test_program(),
'.c',
'CFLAGS')
def check_cxx11_flags(self):
"""
Check if command line flag is required to enable C++11
support.
Returns 1 if no flag is required, 0 if no flag was
found, and the actual flag if one was found.
CXXFLAGS will be updated with appropriate C++11 flag,
accordingly.
"""
return self._check_flags(self._cxx11_flags(),
self._cxx11_test_program(),
'.cpp',
'CXXFLAGS')
def has_pthreads_support(self):
"""
Check if PThreads are supported by this system
Returns 1 if this system DOES support pthreads, 0
otherwise
"""
return self._context.TryCompile(self._pthreads_test_program(), '.c')
# --------------------------------------------------------------
# Check if flag is required to build the given test program.
#
# Arguments:
# test_flags -- list of flags that may be needed to build
# test_program
# test_program -- program used used to determine if one of the
# given flags is required to for a successful
# build
# test_extension -- file extension associated with the test
# program, e.g. '.cpp' for C++ and '.c' for C
# flags_key -- key used to retrieve compiler flags that may
# be updated by this check from the SCons
# environment
# --------------------------------------------------------------
def _check_flags(self,
test_flags,
test_program,
test_extension,
flags_key):
# Check if no additional flags are required.
ret = self._context.TryCompile(test_program,
test_extension)
if ret is 0:
# Try flags known to enable compiler features needed by
# the test program.
last_flags = self._env[flags_key]
for flag in test_flags:
self._env.Append(**{flags_key : flag})
ret = self._context.TryCompile(test_program,
test_extension)
if ret:
# Found a flag!
return flag
else:
# Restore original compiler flags for next flag
# test.
self._env.Replace(**{flags_key : last_flags})
return ret
# ------------------------------------------------------------
# Return test program to be used when checking for basic C99
# support.
#
# Subclasses should implement this template method or use the
# default test program found in the DefaultConfiguration class
# through composition.
# ------------------------------------------------------------
def _c99_test_program(self):
raise NotImplementedError('unimplemented method')
# --------------------------------------------------------------
# Get list of flags that could potentially enable C99 support.
#
# Subclasses should implement this template method if flags are
# needed to enable C99 support.
# --------------------------------------------------------------
def _c99_flags(self):
raise NotImplementedError('unimplemented method')
# ------------------------------------------------------------
# Return test program to be used when checking for basic C++11
# support.
#
# Subclasses should implement this template method or use the
# default test program found in the DefaultConfiguration class
# through composition.
# ------------------------------------------------------------
def _cxx11_test_program(self):
raise NotImplementedError('unimplemented method')
# --------------------------------------------------------------
# Get list of flags that could potentially enable C++11 support.
#
# Subclasses should implement this template method if flags are
# needed to enable C++11 support.
# --------------------------------------------------------------
def _cxx11_flags(self):
raise NotImplementedError('unimplemented method')
# --------------------------------------------------------------
# Return a test program to be used when checking for PThreads
# support
#
# --------------------------------------------------------------
def _pthreads_test_program(self):
return """
#include <unistd.h>
#include <pthread.h>
int main()
{
#ifndef _POSIX_THREADS
# error POSIX Threads support not available
#endif
return 0;
}
"""
|
hezuoguang/ZGVL | refs/heads/master | WLServer/site-packages/django/conf/__init__.py | 106 | """
Settings and configuration for Django.
Values will be read from the module specified by the DJANGO_SETTINGS_MODULE environment
variable, and then from django.conf.global_settings; see the global settings file for
a list of all possible variables.
"""
import logging
import os
import sys
import time # Needed for Windows
import warnings
from django.conf import global_settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.functional import LazyObject, empty
from django.utils import importlib
from django.utils.module_loading import import_by_path
from django.utils import six
ENVIRONMENT_VARIABLE = "DJANGO_SETTINGS_MODULE"
class LazySettings(LazyObject):
"""
A lazy proxy for either global Django settings or a custom settings object.
The user can manually configure settings prior to using them. Otherwise,
Django uses the settings module pointed to by DJANGO_SETTINGS_MODULE.
"""
def _setup(self, name=None):
"""
Load the settings module pointed to by the environment variable. This
is used the first time we need any settings at all, if the user has not
previously configured the settings manually.
"""
try:
settings_module = os.environ[ENVIRONMENT_VARIABLE]
if not settings_module: # If it's set but is an empty string.
raise KeyError
except KeyError:
desc = ("setting %s" % name) if name else "settings"
raise ImproperlyConfigured(
"Requested %s, but settings are not configured. "
"You must either define the environment variable %s "
"or call settings.configure() before accessing settings."
% (desc, ENVIRONMENT_VARIABLE))
self._wrapped = Settings(settings_module)
self._configure_logging()
def __getattr__(self, name):
if self._wrapped is empty:
self._setup(name)
return getattr(self._wrapped, name)
def _configure_logging(self):
"""
Setup logging from LOGGING_CONFIG and LOGGING settings.
"""
if not sys.warnoptions:
try:
# Route warnings through python logging
logging.captureWarnings(True)
# Allow DeprecationWarnings through the warnings filters
warnings.simplefilter("default", DeprecationWarning)
except AttributeError:
# No captureWarnings on Python 2.6, DeprecationWarnings are on anyway
pass
if self.LOGGING_CONFIG:
from django.utils.log import DEFAULT_LOGGING
# First find the logging configuration function ...
logging_config_func = import_by_path(self.LOGGING_CONFIG)
logging_config_func(DEFAULT_LOGGING)
# ... then invoke it with the logging settings
if self.LOGGING:
logging_config_func(self.LOGGING)
def configure(self, default_settings=global_settings, **options):
"""
Called to manually configure the settings. The 'default_settings'
parameter sets where to retrieve any unspecified values from (its
argument must support attribute access (__getattr__)).
"""
if self._wrapped is not empty:
raise RuntimeError('Settings already configured.')
holder = UserSettingsHolder(default_settings)
for name, value in options.items():
setattr(holder, name, value)
self._wrapped = holder
self._configure_logging()
@property
def configured(self):
"""
Returns True if the settings have already been configured.
"""
return self._wrapped is not empty
class BaseSettings(object):
"""
Common logic for settings whether set by a module or by the user.
"""
def __setattr__(self, name, value):
if name in ("MEDIA_URL", "STATIC_URL") and value and not value.endswith('/'):
raise ImproperlyConfigured("If set, %s must end with a slash" % name)
elif name == "ALLOWED_INCLUDE_ROOTS" and isinstance(value, six.string_types):
raise ValueError("The ALLOWED_INCLUDE_ROOTS setting must be set "
"to a tuple, not a string.")
object.__setattr__(self, name, value)
class Settings(BaseSettings):
def __init__(self, settings_module):
# update this dict from global settings (but only for ALL_CAPS settings)
for setting in dir(global_settings):
if setting == setting.upper():
setattr(self, setting, getattr(global_settings, setting))
# store the settings module in case someone later cares
self.SETTINGS_MODULE = settings_module
try:
mod = importlib.import_module(self.SETTINGS_MODULE)
except ImportError as e:
raise ImportError(
"Could not import settings '%s' (Is it on sys.path? Is there an import error in the settings file?): %s"
% (self.SETTINGS_MODULE, e)
)
# Settings that should be converted into tuples if they're mistakenly entered
# as strings.
tuple_settings = ("INSTALLED_APPS", "TEMPLATE_DIRS")
for setting in dir(mod):
if setting == setting.upper():
setting_value = getattr(mod, setting)
if setting in tuple_settings and \
isinstance(setting_value, six.string_types):
warnings.warn("The %s setting must be a tuple. Please fix your "
"settings, as auto-correction is now deprecated." % setting,
DeprecationWarning, stacklevel=2)
setting_value = (setting_value,) # In case the user forgot the comma.
setattr(self, setting, setting_value)
if not self.SECRET_KEY:
raise ImproperlyConfigured("The SECRET_KEY setting must not be empty.")
if hasattr(time, 'tzset') and self.TIME_ZONE:
# When we can, attempt to validate the timezone. If we can't find
# this file, no check happens and it's harmless.
zoneinfo_root = '/usr/share/zoneinfo'
if (os.path.exists(zoneinfo_root) and not
os.path.exists(os.path.join(zoneinfo_root, *(self.TIME_ZONE.split('/'))))):
raise ValueError("Incorrect timezone setting: %s" % self.TIME_ZONE)
# Move the time zone info into os.environ. See ticket #2315 for why
# we don't do this unconditionally (breaks Windows).
os.environ['TZ'] = self.TIME_ZONE
time.tzset()
class UserSettingsHolder(BaseSettings):
"""
Holder for user configured settings.
"""
# SETTINGS_MODULE doesn't make much sense in the manually configured
# (standalone) case.
SETTINGS_MODULE = None
def __init__(self, default_settings):
"""
Requests for configuration variables not in this class are satisfied
from the module specified in default_settings (if possible).
"""
self.__dict__['_deleted'] = set()
self.default_settings = default_settings
def __getattr__(self, name):
if name in self._deleted:
raise AttributeError
return getattr(self.default_settings, name)
def __setattr__(self, name, value):
self._deleted.discard(name)
return super(UserSettingsHolder, self).__setattr__(name, value)
def __delattr__(self, name):
self._deleted.add(name)
return super(UserSettingsHolder, self).__delattr__(name)
def __dir__(self):
return list(self.__dict__) + dir(self.default_settings)
settings = LazySettings()
|
kalahbrown/HueBigSQL | refs/heads/master | desktop/core/ext-py/Django-1.6.10/django/contrib/auth/handlers/modwsgi.py | 119 | from django.contrib import auth
from django import db
from django.utils.encoding import force_bytes
def check_password(environ, username, password):
"""
Authenticates against Django's auth database
mod_wsgi docs specify None, True, False as return value depending
on whether the user exists and authenticates.
"""
UserModel = auth.get_user_model()
# db connection state is managed similarly to the wsgi handler
# as mod_wsgi may call these functions outside of a request/response cycle
db.reset_queries()
try:
try:
user = UserModel._default_manager.get_by_natural_key(username)
except UserModel.DoesNotExist:
return None
if not user.is_active:
return None
return user.check_password(password)
finally:
db.close_old_connections()
def groups_for_user(environ, username):
"""
Authorizes a user based on groups
"""
UserModel = auth.get_user_model()
db.reset_queries()
try:
try:
user = UserModel._default_manager.get_by_natural_key(username)
except UserModel.DoesNotExist:
return []
if not user.is_active:
return []
return [force_bytes(group.name) for group in user.groups.all()]
finally:
db.close_old_connections()
|
dana-i2cat/felix | refs/heads/master | vt_manager/src/python/vt_manager/controller/users/forms.py | 5 | from django import forms
from django.contrib.auth.models import User
class UserForm(forms.ModelForm):
'''
A form for editing Users
'''
class Meta:
model = User
exclude = ('username', 'password', 'last_login', 'date_joined', 'groups', 'user_permissions',
'is_staff', 'is_superuser', 'is_active')
|
TimBuckley/effective_django | refs/heads/master | tests/model_regress/tests.py | 13 | from __future__ import unicode_literals
import datetime
from operator import attrgetter
import sys
import unittest
from django.core.exceptions import ValidationError
from django.test import TestCase, skipUnlessDBFeature
from django.utils import six
from django.utils.timezone import get_fixed_timezone
from django.db import connection, router
from django.db.models.sql import InsertQuery
from .models import (Worker, Article, Party, Event, Department,
BrokenUnicodeMethod, NonAutoPK, Model1, Model2, Model3)
class ModelTests(TestCase):
# The bug is that the following queries would raise:
# "TypeError: Related Field has invalid lookup: gte"
def test_related_gte_lookup(self):
"""
Regression test for #10153: foreign key __gte lookups.
"""
Worker.objects.filter(department__gte=0)
def test_related_lte_lookup(self):
"""
Regression test for #10153: foreign key __lte lookups.
"""
Worker.objects.filter(department__lte=0)
def test_sql_insert_compiler_return_id_attribute(self):
"""
Regression test for #14019: SQLInsertCompiler.as_sql() failure
"""
db = router.db_for_write(Party)
query = InsertQuery(Party)
query.insert_values([Party._meta.fields[0]], [], raw=False)
# this line will raise an AttributeError without the accompanying fix
query.get_compiler(using=db).as_sql()
def test_empty_choice(self):
# NOTE: Part of the regression test here is merely parsing the model
# declaration. The verbose_name, in particular, did not always work.
a = Article.objects.create(
headline="Look at me!", pub_date=datetime.datetime.now()
)
# An empty choice field should return None for the display name.
self.assertIs(a.get_status_display(), None)
# Empty strings should be returned as Unicode
a = Article.objects.get(pk=a.pk)
self.assertEqual(a.misc_data, '')
self.assertIs(type(a.misc_data), six.text_type)
def test_long_textfield(self):
# TextFields can hold more than 4000 characters (this was broken in
# Oracle).
a = Article.objects.create(
headline="Really, really big",
pub_date=datetime.datetime.now(),
article_text="ABCDE" * 1000
)
a = Article.objects.get(pk=a.pk)
self.assertEqual(len(a.article_text), 5000)
def test_date_lookup(self):
# Regression test for #659
Party.objects.create(when=datetime.datetime(1999, 12, 31))
Party.objects.create(when=datetime.datetime(1998, 12, 31))
Party.objects.create(when=datetime.datetime(1999, 1, 1))
Party.objects.create(when=datetime.datetime(1, 3, 3))
self.assertQuerysetEqual(
Party.objects.filter(when__month=2), []
)
self.assertQuerysetEqual(
Party.objects.filter(when__month=1), [
datetime.date(1999, 1, 1)
],
attrgetter("when")
)
self.assertQuerysetEqual(
Party.objects.filter(when__month=12), [
datetime.date(1999, 12, 31),
datetime.date(1998, 12, 31),
],
attrgetter("when"),
ordered=False
)
self.assertQuerysetEqual(
Party.objects.filter(when__year=1998), [
datetime.date(1998, 12, 31),
],
attrgetter("when")
)
# Regression test for #8510
self.assertQuerysetEqual(
Party.objects.filter(when__day="31"), [
datetime.date(1999, 12, 31),
datetime.date(1998, 12, 31),
],
attrgetter("when"),
ordered=False
)
self.assertQuerysetEqual(
Party.objects.filter(when__month="12"), [
datetime.date(1999, 12, 31),
datetime.date(1998, 12, 31),
],
attrgetter("when"),
ordered=False
)
self.assertQuerysetEqual(
Party.objects.filter(when__year="1998"), [
datetime.date(1998, 12, 31),
],
attrgetter("when")
)
# Regression test for #18969
self.assertQuerysetEqual(
Party.objects.filter(when__year=1), [
datetime.date(1, 3, 3),
],
attrgetter("when")
)
self.assertQuerysetEqual(
Party.objects.filter(when__year='1'), [
datetime.date(1, 3, 3),
],
attrgetter("when")
)
if (3,) <= sys.version_info < (3, 3) and connection.vendor == 'mysql':
# In Python < 3.3, datetime.strftime raises an exception for years
# below 1000, and existing MySQL DB-API drivers hit this problem.
test_date_lookup = unittest.expectedFailure(test_date_lookup)
def test_date_filter_null(self):
# Date filtering was failing with NULL date values in SQLite
# (regression test for #3501, amongst other things).
Party.objects.create(when=datetime.datetime(1999, 1, 1))
Party.objects.create()
p = Party.objects.filter(when__month=1)[0]
self.assertEqual(p.when, datetime.date(1999, 1, 1))
self.assertQuerysetEqual(
Party.objects.filter(pk=p.pk).dates("when", "month"), [
1
],
attrgetter("month")
)
def test_get_next_prev_by_field(self):
# Check that get_next_by_FIELD and get_previous_by_FIELD don't crash
# when we have usecs values stored on the database
#
# It crashed after the Field.get_db_prep_* refactor, because on most
# backends DateTimeFields supports usecs, but DateTimeField.to_python
# didn't recognize them. (Note that
# Model._get_next_or_previous_by_FIELD coerces values to strings)
Event.objects.create(when=datetime.datetime(2000, 1, 1, 16, 0, 0))
Event.objects.create(when=datetime.datetime(2000, 1, 1, 6, 1, 1))
Event.objects.create(when=datetime.datetime(2000, 1, 1, 13, 1, 1))
e = Event.objects.create(when=datetime.datetime(2000, 1, 1, 12, 0, 20, 24))
self.assertEqual(
e.get_next_by_when().when, datetime.datetime(2000, 1, 1, 13, 1, 1)
)
self.assertEqual(
e.get_previous_by_when().when, datetime.datetime(2000, 1, 1, 6, 1, 1)
)
def test_primary_key_foreign_key_types(self):
# Check Department and Worker (non-default PK type)
d = Department.objects.create(id=10, name="IT")
w = Worker.objects.create(department=d, name="Full-time")
self.assertEqual(six.text_type(w), "Full-time")
def test_broken_unicode(self):
# Models with broken unicode methods should still have a printable repr
b = BrokenUnicodeMethod.objects.create(name="Jerry")
self.assertEqual(repr(b), "<BrokenUnicodeMethod: [Bad Unicode data]>")
@skipUnlessDBFeature("supports_timezones")
def test_timezones(self):
# Saving an updating with timezone-aware datetime Python objects.
# Regression test for #10443.
# The idea is that all these creations and saving should work without
# crashing. It's not rocket science.
dt1 = datetime.datetime(2008, 8, 31, 16, 20, tzinfo=get_fixed_timezone(600))
dt2 = datetime.datetime(2008, 8, 31, 17, 20, tzinfo=get_fixed_timezone(600))
obj = Article.objects.create(
headline="A headline", pub_date=dt1, article_text="foo"
)
obj.pub_date = dt2
obj.save()
self.assertEqual(
Article.objects.filter(headline="A headline").update(pub_date=dt1),
1
)
def test_chained_fks(self):
"""
Regression for #18432: Chained foreign keys with to_field produce incorrect query
"""
m1 = Model1.objects.create(pkey=1000)
m2 = Model2.objects.create(model1=m1)
m3 = Model3.objects.create(model2=m2)
# this is the actual test for #18432
m3 = Model3.objects.get(model2=1000)
m3.model2
class ModelValidationTest(TestCase):
def test_pk_validation(self):
NonAutoPK.objects.create(name="one")
again = NonAutoPK(name="one")
self.assertRaises(ValidationError, again.validate_unique)
class EvaluateMethodTest(TestCase):
"""
Regression test for #13640: cannot filter by objects with 'evaluate' attr
"""
def test_model_with_evaluate_method(self):
"""
Ensures that you can filter by objects that have an 'evaluate' attr
"""
dept = Department.objects.create(pk=1, name='abc')
dept.evaluate = 'abc'
Worker.objects.filter(department=dept)
|
alphafoobar/intellij-community | refs/heads/master | python/testData/refactoring/move/baseClass/before/src/a.py | 83 | class B(object):
def __init__(self):
pass
class C(B):
def __init__(self):
super(C, self).__init__() |
saurabh6790/med_lib_test | refs/heads/master | webnotes/db.py | 21 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# Database Module
# --------------------
from __future__ import unicode_literals
import MySQLdb
import warnings
import webnotes
from webnotes import conf
import datetime
class Database:
"""
Open a database connection with the given parmeters, if use_default is True, use the
login details from `conf.py`. This is called by the request handler and is accessible using
the `conn` global variable. the `sql` method is also global to run queries
"""
def __init__(self, host=None, user=None, password=None, ac_name=None, use_default = 0):
self.host = host or conf.db_host or 'localhost'
self.user = user or conf.db_name
if ac_name:
self.user = self.get_db_login(ac_name) or conf.db_name
if use_default:
self.user = conf.db_name
self.transaction_writes = 0
self.auto_commit_on_many_writes = 0
self.password = password or webnotes.get_db_password(self.user)
self.connect()
if self.user != 'root':
self.use(self.user)
def get_db_login(self, ac_name):
return ac_name
def connect(self):
"""
Connect to a database
"""
warnings.filterwarnings('ignore', category=MySQLdb.Warning)
self._conn = MySQLdb.connect(user=self.user, host=self.host, passwd=self.password,
use_unicode=True, charset='utf8')
self._conn.converter[246]=float
self._cursor = self._conn.cursor()
webnotes.local.rollback_observers = []
def use(self, db_name):
"""
`USE` db_name
"""
self._conn.select_db(db_name)
self.cur_db_name = db_name
def validate_query(self, q):
cmd = q.strip().lower().split()[0]
if cmd in ['alter', 'drop', 'truncate'] and webnotes.user.name != 'Administrator':
webnotes.msgprint('Not allowed to execute query')
raise Exception
def sql(self, query, values=(), as_dict = 0, as_list = 0, formatted = 0,
debug=0, ignore_ddl=0, as_utf8=0, auto_commit=0, update=None):
"""
* Execute a `query`, with given `values`
* returns as a dictionary if as_dict = 1
* returns as a list of lists (with cleaned up dates) if as_list = 1
"""
# in transaction validations
self.check_transaction_status(query)
# autocommit
if auto_commit: self.commit()
# execute
try:
if values!=():
if isinstance(values, dict):
values = dict(values)
if debug:
try:
self.explain_query(query, values)
webnotes.errprint(query % values)
except TypeError:
webnotes.errprint([query, values])
if (conf.get("logging") or False)==2:
webnotes.log("<<<< query")
webnotes.log(query)
webnotes.log("with values:")
webnotes.log(values)
webnotes.log(">>>>")
self._cursor.execute(query, values)
else:
if debug:
self.explain_query(query)
webnotes.errprint(query)
if (conf.get("logging") or False)==2:
webnotes.log("<<<< query")
webnotes.log(query)
webnotes.log(">>>>")
self._cursor.execute(query)
except Exception, e:
# ignore data definition errors
if ignore_ddl and e.args[0] in (1146,1054,1091):
pass
else:
raise
if auto_commit: self.commit()
# scrub output if required
if as_dict:
ret = self.fetch_as_dict(formatted, as_utf8)
if update:
for r in ret:
r.update(update)
return ret
elif as_list:
return self.convert_to_lists(self._cursor.fetchall(), formatted, as_utf8)
elif as_utf8:
return self.convert_to_lists(self._cursor.fetchall(), formatted, as_utf8)
else:
return self._cursor.fetchall()
def explain_query(self, query, values=None):
try:
webnotes.errprint("--- query explain ---")
if values is None:
self._cursor.execute("explain " + query)
else:
self._cursor.execute("explain " + query, values)
import json
webnotes.errprint(json.dumps(self.fetch_as_dict(), indent=1))
webnotes.errprint("--- query explain end ---")
except:
webnotes.errprint("error in query explain")
def sql_list(self, query, values=(), debug=False):
return [r[0] for r in self.sql(query, values, debug=debug)]
def sql_ddl(self, query, values=()):
self.commit()
self.sql(query)
def check_transaction_status(self, query):
if self.transaction_writes and query and query.strip().split()[0].lower() in ['start', 'alter', 'drop', 'create', "begin"]:
raise Exception, 'This statement can cause implicit commit'
if query and query.strip().lower() in ('commit', 'rollback'):
self.transaction_writes = 0
if query[:6].lower() in ['update', 'insert']:
self.transaction_writes += 1
if not webnotes.flags.in_test and self.transaction_writes > 10000:
if self.auto_commit_on_many_writes:
webnotes.conn.commit()
webnotes.conn.begin()
else:
webnotes.msgprint('A very long query was encountered. If you are trying to import data, please do so using smaller files')
raise Exception, 'Bad Query!!! Too many writes'
def fetch_as_dict(self, formatted=0, as_utf8=0):
result = self._cursor.fetchall()
ret = []
needs_formatting = self.needs_formatting(result, formatted)
for r in result:
row_dict = webnotes._dict({})
for i in range(len(r)):
if needs_formatting:
val = self.convert_to_simple_type(r[i], formatted)
else:
val = r[i]
if as_utf8 and type(val) is unicode:
val = val.encode('utf-8')
row_dict[self._cursor.description[i][0]] = val
ret.append(row_dict)
return ret
def needs_formatting(self, result, formatted):
if result and result[0]:
for v in result[0]:
if isinstance(v, (datetime.date, datetime.timedelta, datetime.datetime, long)):
return True
if formatted and isinstance(v, (int, float)):
return True
return False
def get_description(self):
return self._cursor.description
def convert_to_simple_type(self, v, formatted=0):
from webnotes.utils import formatdate, fmt_money
if isinstance(v, (datetime.date, datetime.timedelta, datetime.datetime, long)):
if isinstance(v, datetime.date):
v = unicode(v)
if formatted:
v = formatdate(v)
# time
elif isinstance(v, (datetime.timedelta, datetime.datetime)):
v = unicode(v)
# long
elif isinstance(v, long):
v=int(v)
# convert to strings... (if formatted)
if formatted:
if isinstance(v, float):
v=fmt_money(v)
elif isinstance(v, int):
v = unicode(v)
return v
def convert_to_lists(self, res, formatted=0, as_utf8=0):
nres = []
needs_formatting = self.needs_formatting(res, formatted)
for r in res:
nr = []
for c in r:
if needs_formatting:
val = self.convert_to_simple_type(c, formatted)
else:
val = c
if as_utf8 and type(val) is unicode:
val = val.encode('utf-8')
nr.append(val)
nres.append(nr)
return nres
def convert_to_utf8(self, res, formatted=0):
nres = []
for r in res:
nr = []
for c in r:
if type(c) is unicode:
c = c.encode('utf-8')
nr.append(self.convert_to_simple_type(c, formatted))
nres.append(nr)
return nres
def build_conditions(self, filters):
def _build_condition(key):
"""
filter's key is passed by map function
build conditions like:
* ifnull(`fieldname`, default_value) = %(fieldname)s
* `fieldname` [=, !=, >, >=, <, <=] %(fieldname)s
"""
_operator = "="
value = filters.get(key)
if isinstance(value, (list, tuple)):
_operator = value[0]
filters[key] = value[1]
if _operator not in ["=", "!=", ">", ">=", "<", "<=", "like"]:
_operator = "="
if "[" in key:
split_key = key.split("[")
return "ifnull(`" + split_key[0] + "`, " + split_key[1][:-1] + ") " \
+ _operator + " %(" + key + ")s"
else:
return "`" + key + "` " + _operator + " %(" + key + ")s"
if isinstance(filters, basestring):
filters = { "name": filters }
conditions = map(_build_condition, filters)
return " and ".join(conditions), filters
def get(self, doctype, filters=None, as_dict=True):
return self.get_value(doctype, filters, "*", as_dict=as_dict)
def get_value(self, doctype, filters=None, fieldname="name", ignore=None, as_dict=False, debug=False):
"""Get a single / multiple value from a record.
For Single DocType, let filters be = None"""
ret = self.get_values(doctype, filters, fieldname, ignore, as_dict, debug)
return ((len(ret[0]) > 1 or as_dict) and ret[0] or ret[0][0]) if ret else None
def get_values(self, doctype, filters=None, fieldname="name", ignore=None, as_dict=False, debug=False):
if isinstance(filters, list):
return self.get_value_for_many_names(doctype, filters, fieldname, debug=debug)
fields = fieldname
if fieldname!="*":
if isinstance(fieldname, basestring):
fields = [fieldname]
else:
fields = fieldname
if (filters is not None) and (filters!=doctype or doctype=="DocType"):
try:
return self.get_values_from_table(fields, filters, doctype, as_dict, debug)
except Exception, e:
if ignore and e.args[0] in (1146, 1054):
# table or column not found, return None
return None
elif (not ignore) and e.args[0]==1146:
# table not found, look in singles
pass
else:
raise
return self.get_values_from_single(fields, filters, doctype, as_dict, debug)
def get_values_from_single(self, fields, filters, doctype, as_dict=False, debug=False):
if fields=="*" or isinstance(filters, dict):
r = self.sql("""select field, value from tabSingles where doctype=%s""", doctype)
# check if single doc matches with filters
values = webnotes._dict(r)
if isinstance(filters, dict):
for key, value in filters.items():
if values.get(key) != value:
return []
if as_dict:
return values and [values] or []
if isinstance(fields, list):
return [map(lambda d: values.get(d), fields)]
else:
r = self.sql("""select field, value
from tabSingles where field in (%s) and doctype=%s""" \
% (', '.join(['%s'] * len(fields)), '%s'),
tuple(fields) + (doctype,), as_dict=False, debug=debug)
if as_dict:
return r and [webnotes._dict(r)] or []
else:
return r and [[i[1] for i in r]] or []
def get_values_from_table(self, fields, filters, doctype, as_dict, debug):
fl = []
if isinstance(fields, (list, tuple)):
for f in fields:
if "(" in f: # function
fl.append(f)
else:
fl.append("`" + f + "`")
fl = ", ".join(fields)
else:
fl = fields
if fields=="*":
as_dict = True
conditions, filters = self.build_conditions(filters)
r = self.sql("select %s from `tab%s` where %s" % (fl, doctype,
conditions), filters, as_dict=as_dict, debug=debug)
return r
def get_value_for_many_names(self, doctype, names, field, debug=False):
names = filter(None, names)
if names:
return dict(self.sql("select name, `%s` from `tab%s` where name in (%s)" \
% (field, doctype, ", ".join(["%s"]*len(names))), names, debug=debug))
else:
return {}
def set_value(self, dt, dn, field, val, modified=None, modified_by=None):
from webnotes.utils import now
if dn and dt!=dn:
self.sql("""update `tab%s` set `%s`=%s, modified=%s, modified_by=%s
where name=%s""" % (dt, field, "%s", "%s", "%s", "%s"),
(val, modified or now(), modified_by or webnotes.session["user"], dn))
else:
if self.sql("select value from tabSingles where field=%s and doctype=%s", (field, dt)):
self.sql("""update tabSingles set value=%s where field=%s and doctype=%s""",
(val, field, dt))
else:
self.sql("""insert into tabSingles(doctype, field, value)
values (%s, %s, %s)""", (dt, field, val, ))
if field!="modified":
self.set_value(dt, dn, "modified", modified or now())
def set_in_doc(self, doc, field, val):
self.set(doc, field, val)
def set(self, doc, field, val):
from webnotes.utils import now
doc.modified = now()
doc.modified_by = webnotes.session["user"]
self.set_value(doc.doctype, doc.name, field, val, doc.modified, doc.modified_by)
doc.fields[field] = val
def touch(self, doctype, docname):
from webnotes.utils import now
webnotes.conn.sql("""update `tab{doctype}` set `modified`=%s
where name=%s""".format(doctype=doctype), (now(), docname))
def set_global(self, key, val, user='__global'):
self.set_default(key, val, user)
def get_global(self, key, user='__global'):
return self.get_default(key, user)
def set_default(self, key, val, parent="Control Panel"):
"""set control panel default (tabDefaultVal)"""
import webnotes.defaults
webnotes.defaults.set_default(key, val, parent)
def add_default(self, key, val, parent="Control Panel"):
import webnotes.defaults
webnotes.defaults.add_default(key, val, parent)
def get_default(self, key, parent="Control Panel"):
"""get default value"""
import webnotes.defaults
d = webnotes.defaults.get_defaults(parent).get(key)
return isinstance(d, list) and d[0] or d
def get_defaults_as_list(self, key, parent="Control Panel"):
import webnotes.defaults
d = webnotes.defaults.get_default(key, parent)
return isinstance(d, basestring) and [d] or d
def get_defaults(self, key=None, parent="Control Panel"):
"""get all defaults"""
import webnotes.defaults
if key:
return webnotes.defaults.get_defaults(parent).get(key)
else:
return webnotes.defaults.get_defaults(parent)
def begin(self):
return # not required
def commit(self):
self.sql("commit")
webnotes.local.rollback_observers = []
def rollback(self):
self.sql("rollback")
for obj in webnotes.local.rollback_observers:
if hasattr(obj, "on_rollback"):
obj.on_rollback()
webnotes.local.rollback_observers = []
def field_exists(self, dt, fn):
return self.sql("select name from tabDocField where fieldname=%s and parent=%s", (dt, fn))
def table_exists(self, tablename):
return tablename in [d[0] for d in self.sql("show tables")]
def exists(self, dt, dn=None):
if isinstance(dt, basestring):
if dt==dn:
return True # single always exists (!)
try:
return self.sql('select name from `tab%s` where name=%s' % (dt, '%s'), dn)
except:
return None
elif isinstance(dt, dict) and dt.get('doctype'):
try:
conditions = []
for d in dt:
if d == 'doctype': continue
conditions.append('`%s` = "%s"' % (d, dt[d].replace('"', '\"')))
return self.sql('select name from `tab%s` where %s' % \
(dt['doctype'], " and ".join(conditions)))
except:
return None
def count(self, dt, filters=None, debug=False):
if filters:
conditions, filters = self.build_conditions(filters)
return webnotes.conn.sql("""select count(*)
from `tab%s` where %s""" % (dt, conditions), filters, debug=debug)[0][0]
else:
return webnotes.conn.sql("""select count(*)
from `tab%s`""" % (dt,))[0][0]
def get_table_columns(self, doctype):
return [r[0] for r in self.sql("DESC `tab%s`" % doctype)]
def close(self):
if self._conn:
self._cursor.close()
self._conn.close()
self._cursor = None
self._conn = None
|
veltri/DLV2 | refs/heads/master | tests/parser/duplicates.4.test.py | 1 | input = """
b :- not a, not x.
a :- not b, not y.
x :- not b.
y :- not a.
"""
output = """
b :- not a, not x.
a :- not b, not y.
x :- not b.
y :- not a.
"""
|
syci/reporting-engine | refs/heads/8.0 | __unported__/base_report_assembler/assembled_report.py | 13 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Vaucher
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
class AssembledReport(orm.Model):
_name = 'assembled.report'
_order = 'sequence'
_columns = {
'report_id': fields.many2one(
'ir.actions.report.xml', 'Report',
domain="[('model', '=', model),"
"('report_type', '!=', 'assemblage')]", required=True),
'model': fields.char('Object model'),
'sequence': fields.integer('Sequence', required=True),
'company_id': fields.many2one('res.company', 'Company', required=True),
}
_defaults = {
'sequence': 1,
'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(
cr, uid, 'assembled.report', context=c)
}
|
ddrmanxbxfr/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/pywebsocket/src/test/test_endtoend.py | 449 | #!/usr/bin/env python
#
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""End-to-end tests for pywebsocket. Tests standalone.py by default. You
can also test mod_pywebsocket hosted on an Apache server by setting
_use_external_server to True and modifying _external_server_port to point to
the port on which the Apache server is running.
"""
import logging
import os
import signal
import socket
import subprocess
import sys
import time
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from test import client_for_testing
from test import mux_client_for_testing
# Special message that tells the echo server to start closing handshake
_GOODBYE_MESSAGE = 'Goodbye'
_SERVER_WARMUP_IN_SEC = 0.2
# If you want to use external server to run end to end tests, set following
# parameters correctly.
_use_external_server = False
_external_server_port = 0
# Test body functions
def _echo_check_procedure(client):
client.connect()
client.send_message('test')
client.assert_receive('test')
client.send_message('helloworld')
client.assert_receive('helloworld')
client.send_close()
client.assert_receive_close()
client.assert_connection_closed()
def _echo_check_procedure_with_binary(client):
client.connect()
client.send_message('binary', binary=True)
client.assert_receive('binary', binary=True)
client.send_message('\x00\x80\xfe\xff\x00\x80', binary=True)
client.assert_receive('\x00\x80\xfe\xff\x00\x80', binary=True)
client.send_close()
client.assert_receive_close()
client.assert_connection_closed()
def _echo_check_procedure_with_goodbye(client):
client.connect()
client.send_message('test')
client.assert_receive('test')
client.send_message(_GOODBYE_MESSAGE)
client.assert_receive(_GOODBYE_MESSAGE)
client.assert_receive_close()
client.send_close()
client.assert_connection_closed()
def _echo_check_procedure_with_code_and_reason(client, code, reason):
client.connect()
client.send_close(code, reason)
client.assert_receive_close(code, reason)
client.assert_connection_closed()
def _unmasked_frame_check_procedure(client):
client.connect()
client.send_message('test', mask=False)
client.assert_receive_close(client_for_testing.STATUS_PROTOCOL_ERROR, '')
client.assert_connection_closed()
def _mux_echo_check_procedure(mux_client):
mux_client.connect()
mux_client.send_flow_control(1, 1024)
logical_channel_options = client_for_testing.ClientOptions()
logical_channel_options.server_host = 'localhost'
logical_channel_options.server_port = 80
logical_channel_options.origin = 'http://localhost'
logical_channel_options.resource = '/echo'
mux_client.add_channel(2, logical_channel_options)
mux_client.send_flow_control(2, 1024)
mux_client.send_message(2, 'test')
mux_client.assert_receive(2, 'test')
mux_client.add_channel(3, logical_channel_options)
mux_client.send_flow_control(3, 1024)
mux_client.send_message(2, 'hello')
mux_client.send_message(3, 'world')
mux_client.assert_receive(2, 'hello')
mux_client.assert_receive(3, 'world')
# Don't send close message on channel id 1 so that server-initiated
# closing handshake won't occur.
mux_client.send_close(2)
mux_client.send_close(3)
mux_client.assert_receive_close(2)
mux_client.assert_receive_close(3)
mux_client.send_physical_connection_close()
mux_client.assert_physical_connection_receive_close()
class EndToEndTestBase(unittest.TestCase):
"""Base class for end-to-end tests that launch pywebsocket standalone
server as a separate process, connect to it using the client_for_testing
module, and check if the server behaves correctly by exchanging opening
handshake and frames over a TCP connection.
"""
def setUp(self):
self.server_stderr = None
self.top_dir = os.path.join(os.path.split(__file__)[0], '..')
os.putenv('PYTHONPATH', os.path.pathsep.join(sys.path))
self.standalone_command = os.path.join(
self.top_dir, 'mod_pywebsocket', 'standalone.py')
self.document_root = os.path.join(self.top_dir, 'example')
s = socket.socket()
s.bind(('localhost', 0))
(_, self.test_port) = s.getsockname()
s.close()
self._options = client_for_testing.ClientOptions()
self._options.server_host = 'localhost'
self._options.origin = 'http://localhost'
self._options.resource = '/echo'
# TODO(toyoshim): Eliminate launching a standalone server on using
# external server.
if _use_external_server:
self._options.server_port = _external_server_port
else:
self._options.server_port = self.test_port
# TODO(tyoshino): Use tearDown to kill the server.
def _run_python_command(self, commandline, stdout=None, stderr=None):
return subprocess.Popen([sys.executable] + commandline, close_fds=True,
stdout=stdout, stderr=stderr)
def _run_server(self):
args = [self.standalone_command,
'-H', 'localhost',
'-V', 'localhost',
'-p', str(self.test_port),
'-P', str(self.test_port),
'-d', self.document_root]
# Inherit the level set to the root logger by test runner.
root_logger = logging.getLogger()
log_level = root_logger.getEffectiveLevel()
if log_level != logging.NOTSET:
args.append('--log-level')
args.append(logging.getLevelName(log_level).lower())
return self._run_python_command(args,
stderr=self.server_stderr)
def _kill_process(self, pid):
if sys.platform in ('win32', 'cygwin'):
subprocess.call(
('taskkill.exe', '/f', '/pid', str(pid)), close_fds=True)
else:
os.kill(pid, signal.SIGKILL)
class EndToEndHyBiTest(EndToEndTestBase):
def setUp(self):
EndToEndTestBase.setUp(self)
def _run_test_with_client_options(self, test_function, options):
server = self._run_server()
try:
# TODO(tyoshino): add some logic to poll the server until it
# becomes ready
time.sleep(_SERVER_WARMUP_IN_SEC)
client = client_for_testing.create_client(options)
try:
test_function(client)
finally:
client.close_socket()
finally:
self._kill_process(server.pid)
def _run_test(self, test_function):
self._run_test_with_client_options(test_function, self._options)
def _run_deflate_frame_test(self, test_function):
server = self._run_server()
try:
time.sleep(_SERVER_WARMUP_IN_SEC)
self._options.enable_deflate_frame()
client = client_for_testing.create_client(self._options)
try:
test_function(client)
finally:
client.close_socket()
finally:
self._kill_process(server.pid)
def _run_permessage_deflate_test(
self, offer, response_checker, test_function):
server = self._run_server()
try:
time.sleep(_SERVER_WARMUP_IN_SEC)
self._options.extensions += offer
self._options.check_permessage_deflate = response_checker
client = client_for_testing.create_client(self._options)
try:
client.connect()
if test_function is not None:
test_function(client)
client.assert_connection_closed()
finally:
client.close_socket()
finally:
self._kill_process(server.pid)
def _run_close_with_code_and_reason_test(self, test_function, code,
reason):
server = self._run_server()
try:
time.sleep(_SERVER_WARMUP_IN_SEC)
client = client_for_testing.create_client(self._options)
try:
test_function(client, code, reason)
finally:
client.close_socket()
finally:
self._kill_process(server.pid)
def _run_http_fallback_test(self, options, status):
server = self._run_server()
try:
time.sleep(_SERVER_WARMUP_IN_SEC)
client = client_for_testing.create_client(options)
try:
client.connect()
self.fail('Could not catch HttpStatusException')
except client_for_testing.HttpStatusException, e:
self.assertEqual(status, e.status)
except Exception, e:
self.fail('Catch unexpected exception')
finally:
client.close_socket()
finally:
self._kill_process(server.pid)
def _run_mux_test(self, test_function):
server = self._run_server()
try:
time.sleep(_SERVER_WARMUP_IN_SEC)
client = mux_client_for_testing.MuxClient(self._options)
try:
test_function(client)
finally:
client.close_socket()
finally:
self._kill_process(server.pid)
def test_echo(self):
self._run_test(_echo_check_procedure)
def test_echo_binary(self):
self._run_test(_echo_check_procedure_with_binary)
def test_echo_server_close(self):
self._run_test(_echo_check_procedure_with_goodbye)
def test_unmasked_frame(self):
self._run_test(_unmasked_frame_check_procedure)
def test_echo_deflate_frame(self):
self._run_deflate_frame_test(_echo_check_procedure)
def test_echo_deflate_frame_server_close(self):
self._run_deflate_frame_test(
_echo_check_procedure_with_goodbye)
def test_echo_permessage_deflate(self):
def test_function(client):
# From the examples in the spec.
compressed_hello = '\xf2\x48\xcd\xc9\xc9\x07\x00'
client._stream.send_data(
compressed_hello,
client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.assert_receive_binary(
compressed_hello,
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
self.assertEquals('permessage-deflate', parameter.name())
self.assertEquals([], parameter.get_parameters())
self._run_permessage_deflate_test(
['permessage-deflate'],
response_checker,
test_function)
def test_echo_permessage_deflate_two_frames(self):
def test_function(client):
# From the examples in the spec.
client._stream.send_data(
'\xf2\x48\xcd',
client_for_testing.OPCODE_TEXT,
end=False,
rsv1=1)
client._stream.send_data(
'\xc9\xc9\x07\x00',
client_for_testing.OPCODE_TEXT)
client._stream.assert_receive_binary(
'\xf2\x48\xcd\xc9\xc9\x07\x00',
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
self.assertEquals('permessage-deflate', parameter.name())
self.assertEquals([], parameter.get_parameters())
self._run_permessage_deflate_test(
['permessage-deflate'],
response_checker,
test_function)
def test_echo_permessage_deflate_two_messages(self):
def test_function(client):
# From the examples in the spec.
client._stream.send_data(
'\xf2\x48\xcd\xc9\xc9\x07\x00',
client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.send_data(
'\xf2\x00\x11\x00\x00',
client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.assert_receive_binary(
'\xf2\x48\xcd\xc9\xc9\x07\x00',
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.assert_receive_binary(
'\xf2\x00\x11\x00\x00',
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
self.assertEquals('permessage-deflate', parameter.name())
self.assertEquals([], parameter.get_parameters())
self._run_permessage_deflate_test(
['permessage-deflate'],
response_checker,
test_function)
def test_echo_permessage_deflate_two_msgs_server_no_context_takeover(self):
def test_function(client):
# From the examples in the spec.
client._stream.send_data(
'\xf2\x48\xcd\xc9\xc9\x07\x00',
client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.send_data(
'\xf2\x00\x11\x00\x00',
client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.assert_receive_binary(
'\xf2\x48\xcd\xc9\xc9\x07\x00',
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.assert_receive_binary(
'\xf2\x48\xcd\xc9\xc9\x07\x00',
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
self.assertEquals('permessage-deflate', parameter.name())
self.assertEquals([('server_no_context_takeover', None)],
parameter.get_parameters())
self._run_permessage_deflate_test(
['permessage-deflate; server_no_context_takeover'],
response_checker,
test_function)
def test_echo_permessage_deflate_preference(self):
def test_function(client):
# From the examples in the spec.
compressed_hello = '\xf2\x48\xcd\xc9\xc9\x07\x00'
client._stream.send_data(
compressed_hello,
client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.assert_receive_binary(
compressed_hello,
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
self.assertEquals('permessage-deflate', parameter.name())
self.assertEquals([], parameter.get_parameters())
self._run_permessage_deflate_test(
['permessage-deflate', 'deflate-frame'],
response_checker,
test_function)
def test_echo_permessage_deflate_with_parameters(self):
def test_function(client):
# From the examples in the spec.
compressed_hello = '\xf2\x48\xcd\xc9\xc9\x07\x00'
client._stream.send_data(
compressed_hello,
client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.assert_receive_binary(
compressed_hello,
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
self.assertEquals('permessage-deflate', parameter.name())
self.assertEquals([('server_max_window_bits', '10'),
('server_no_context_takeover', None)],
parameter.get_parameters())
self._run_permessage_deflate_test(
['permessage-deflate; server_max_window_bits=10; '
'server_no_context_takeover'],
response_checker,
test_function)
def test_echo_permessage_deflate_with_bad_server_max_window_bits(self):
def test_function(client):
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
raise Exception('Unexpected acceptance of permessage-deflate')
self._run_permessage_deflate_test(
['permessage-deflate; server_max_window_bits=3000000'],
response_checker,
test_function)
def test_echo_permessage_deflate_with_bad_server_max_window_bits(self):
def test_function(client):
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
raise Exception('Unexpected acceptance of permessage-deflate')
self._run_permessage_deflate_test(
['permessage-deflate; server_max_window_bits=3000000'],
response_checker,
test_function)
def test_echo_permessage_deflate_with_undefined_parameter(self):
def test_function(client):
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
raise Exception('Unexpected acceptance of permessage-deflate')
self._run_permessage_deflate_test(
['permessage-deflate; foo=bar'],
response_checker,
test_function)
def test_echo_close_with_code_and_reason(self):
self._options.resource = '/close'
self._run_close_with_code_and_reason_test(
_echo_check_procedure_with_code_and_reason, 3333, 'sunsunsunsun')
def test_echo_close_with_empty_body(self):
self._options.resource = '/close'
self._run_close_with_code_and_reason_test(
_echo_check_procedure_with_code_and_reason, None, '')
def test_mux_echo(self):
self._run_mux_test(_mux_echo_check_procedure)
def test_close_on_protocol_error(self):
"""Tests that the server sends a close frame with protocol error status
code when the client sends data with some protocol error.
"""
def test_function(client):
client.connect()
# Intermediate frame without any preceding start of fragmentation
# frame.
client.send_frame_of_arbitrary_bytes('\x80\x80', '')
client.assert_receive_close(
client_for_testing.STATUS_PROTOCOL_ERROR)
self._run_test(test_function)
def test_close_on_unsupported_frame(self):
"""Tests that the server sends a close frame with unsupported operation
status code when the client sends data asking some operation that is
not supported by the server.
"""
def test_function(client):
client.connect()
# Text frame with RSV3 bit raised.
client.send_frame_of_arbitrary_bytes('\x91\x80', '')
client.assert_receive_close(
client_for_testing.STATUS_UNSUPPORTED_DATA)
self._run_test(test_function)
def test_close_on_invalid_frame(self):
"""Tests that the server sends a close frame with invalid frame payload
data status code when the client sends an invalid frame like containing
invalid UTF-8 character.
"""
def test_function(client):
client.connect()
# Text frame with invalid UTF-8 string.
client.send_message('\x80', raw=True)
client.assert_receive_close(
client_for_testing.STATUS_INVALID_FRAME_PAYLOAD_DATA)
self._run_test(test_function)
def test_close_on_internal_endpoint_error(self):
"""Tests that the server sends a close frame with internal endpoint
error status code when the handler does bad operation.
"""
self._options.resource = '/internal_error'
def test_function(client):
client.connect()
client.assert_receive_close(
client_for_testing.STATUS_INTERNAL_ENDPOINT_ERROR)
self._run_test(test_function)
# TODO(toyoshim): Add tests to verify invalid absolute uri handling like
# host unmatch, port unmatch and invalid port description (':' without port
# number).
def test_absolute_uri(self):
"""Tests absolute uri request."""
options = self._options
options.resource = 'ws://localhost:%d/echo' % options.server_port
self._run_test_with_client_options(_echo_check_procedure, options)
def test_origin_check(self):
"""Tests http fallback on origin check fail."""
options = self._options
options.resource = '/origin_check'
# Server shows warning message for http 403 fallback. This warning
# message is confusing. Following pipe disposes warning messages.
self.server_stderr = subprocess.PIPE
self._run_http_fallback_test(options, 403)
def test_version_check(self):
"""Tests http fallback on version check fail."""
options = self._options
options.version = 99
self._run_http_fallback_test(options, 400)
class EndToEndHyBi00Test(EndToEndTestBase):
def setUp(self):
EndToEndTestBase.setUp(self)
def _run_test(self, test_function):
server = self._run_server()
try:
time.sleep(_SERVER_WARMUP_IN_SEC)
client = client_for_testing.create_client_hybi00(self._options)
try:
test_function(client)
finally:
client.close_socket()
finally:
self._kill_process(server.pid)
def test_echo(self):
self._run_test(_echo_check_procedure)
def test_echo_server_close(self):
self._run_test(_echo_check_procedure_with_goodbye)
class EndToEndTestWithEchoClient(EndToEndTestBase):
def setUp(self):
EndToEndTestBase.setUp(self)
def _check_example_echo_client_result(
self, expected, stdoutdata, stderrdata):
actual = stdoutdata.decode("utf-8")
if actual != expected:
raise Exception('Unexpected result on example echo client: '
'%r (expected) vs %r (actual)' %
(expected, actual))
if stderrdata is not None:
raise Exception('Unexpected error message on example echo '
'client: %r' % stderrdata)
def test_example_echo_client(self):
"""Tests that the echo_client.py example can talk with the server."""
server = self._run_server()
try:
time.sleep(_SERVER_WARMUP_IN_SEC)
client_command = os.path.join(
self.top_dir, 'example', 'echo_client.py')
# Expected output for the default messages.
default_expectation = ('Send: Hello\n' 'Recv: Hello\n'
u'Send: \u65e5\u672c\n' u'Recv: \u65e5\u672c\n'
'Send close\n' 'Recv ack\n')
args = [client_command,
'-p', str(self._options.server_port)]
client = self._run_python_command(args, stdout=subprocess.PIPE)
stdoutdata, stderrdata = client.communicate()
self._check_example_echo_client_result(
default_expectation, stdoutdata, stderrdata)
# Process a big message for which extended payload length is used.
# To handle extended payload length, ws_version attribute will be
# accessed. This test checks that ws_version is correctly set.
big_message = 'a' * 1024
args = [client_command,
'-p', str(self._options.server_port),
'-m', big_message]
client = self._run_python_command(args, stdout=subprocess.PIPE)
stdoutdata, stderrdata = client.communicate()
expected = ('Send: %s\nRecv: %s\nSend close\nRecv ack\n' %
(big_message, big_message))
self._check_example_echo_client_result(
expected, stdoutdata, stderrdata)
# Test the permessage-deflate extension.
args = [client_command,
'-p', str(self._options.server_port),
'--use_permessage_deflate']
client = self._run_python_command(args, stdout=subprocess.PIPE)
stdoutdata, stderrdata = client.communicate()
self._check_example_echo_client_result(
default_expectation, stdoutdata, stderrdata)
finally:
self._kill_process(server.pid)
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
|
GennadiyZakharov/relativepotency | refs/heads/master | src/rpgui/rpcentralwidget.py | 1 | '''
Created on Oct 30, 2014
@author: gzakharov
'''
from PyQt4 import QtCore, QtGui
from rpcore.experiment import Experiment
from rpcore.actions import createAction
from rpgui.plotwidget import PlotWidget
class RpCentralWidget(QtGui.QWidget):
'''
classdocs
'''
def __init__(self, parent=None):
'''
Constructor
'''
super(RpCentralWidget, self).__init__(parent)
self.setObjectName("rpCentralWidget")
self.experiment = Experiment(self)
self.experiment.signalSampleAdded.connect(self.sampleAdded)
self.dataTable=QtGui.QTableWidget()
self.dataTable.setColumnCount(1)
self.dataTable.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.dataTable.horizontalHeader().setResizeMode(QtGui.QHeaderView.Stretch)
self.dataTable.verticalHeader().setResizeMode(QtGui.QHeaderView.Stretch)
self.plotWidget = PlotWidget(self)
plotExperimentAction = createAction(self, 'Plot experiment', '',
'document-open', '')
plotExperimentAction.triggered.connect(self.plotExperiment)
saveReferenceAction = createAction(self, 'Save reference...', '',
'document-save', '')
#saveReferenceAction.triggered.connect(self.saveReference)
self.actionList = [plotExperimentAction,]
self.headers = ['Conc']
self.dataTable.setHorizontalHeaderLabels(self.headers)
layout = QtGui.QHBoxLayout()
layout.addWidget(self.dataTable)
layout.addWidget(self.plotWidget)
self.setLayout(layout)
QtCore.QTimer.singleShot(0, self.experiment.initTestData)
def sampleAdded(self, sample):
self.dataTable.setRowCount(len(sample.concentrations))
for i,concenration in enumerate( sample.concentrations ):
self.dataTable.setItem(i,0,QtGui.QTableWidgetItem('{:.2f}'.format(concenration)))
for i,dataset in enumerate(sample.densities) :
self.headers.append(sample.name+':'+str(i))
column=self.dataTable.columnCount()
self.dataTable.setColumnCount(column+1)
for j,density in enumerate(dataset) :
self.dataTable.setItem(j,column,QtGui.QTableWidgetItem('{:.2f}'.format(density)))
self.dataTable.setHorizontalHeaderLabels(self.headers)
def plotExperiment(self):
plot=self.experiment.plotSamples()
plot.show() |
Bismarrck/tensorflow | refs/heads/master | tensorflow/python/keras/applications/imagenet_utils.py | 12 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for ImageNet data preprocessing & prediction decoding.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras_applications import imagenet_utils
from tensorflow.python.keras.applications import keras_modules_injection
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.applications.imagenet_utils.preprocess_input')
@keras_modules_injection
def decode_predictions(*args, **kwargs):
return imagenet_utils.decode_predictions(*args, **kwargs)
@keras_export('keras.applications.imagenet_utils.preprocess_input')
@keras_modules_injection
def preprocess_input(*args, **kwargs):
return imagenet_utils.preprocess_input(*args, **kwargs)
|
kvar/ansible | refs/heads/seas_master_2.9.5 | test/units/modules/storage/netapp/test_na_ontap_volume.py | 21 | # (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
''' unit test template for ONTAP Ansible module '''
from __future__ import print_function
import json
import pytest
from units.compat import unittest
from units.compat.mock import patch, Mock
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
import ansible.module_utils.netapp as netapp_utils
from ansible.modules.storage.netapp.na_ontap_volume \
import NetAppOntapVolume as vol_module # module under test
if not netapp_utils.has_netapp_lib():
pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
def set_module_args(args):
"""prepare arguments so that they will be picked up during module creation"""
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
class AnsibleExitJson(Exception):
"""Exception class to be raised by module.exit_json and caught by the test case"""
pass
class AnsibleFailJson(Exception):
"""Exception class to be raised by module.fail_json and caught by the test case"""
pass
def exit_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over exit_json; package return data into an exception"""
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
def fail_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over fail_json; package return data into an exception"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
class MockONTAPConnection(object):
''' mock server connection to ONTAP host '''
def __init__(self, kind=None, data=None, job_error=None):
''' save arguments '''
self.kind = kind
self.params = data
self.xml_in = None
self.xml_out = None
self.job_error = job_error
def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
''' mock invoke_successfully returning xml data '''
self.xml_in = xml
if self.kind == 'volume':
xml = self.build_volume_info(self.params)
elif self.kind == 'job_info':
xml = self.build_job_info(self.job_error)
elif self.kind == 'error_modify':
xml = self.build_modify_error()
elif self.kind == 'failure_modify_async':
xml = self.build_failure_modify_async()
elif self.kind == 'success_modify_async':
xml = self.build_success_modify_async()
elif self.kind == 'zapi_error':
error = netapp_utils.zapi.NaApiError('test', 'error')
raise error
self.xml_out = xml
return xml
@staticmethod
def build_volume_info(vol_details):
''' build xml data for volume-attributes '''
xml = netapp_utils.zapi.NaElement('xml')
attributes = {
'num-records': 1,
'attributes-list': {
'volume-attributes': {
'volume-id-attributes': {
'containing-aggregate-name': vol_details['aggregate'],
'junction-path': vol_details['junction_path'],
'style-extended': 'flexvol',
'comment': vol_details['comment']
},
'volume-language-attributes': {
'language-code': 'en'
},
'volume-export-attributes': {
'policy': 'default'
},
'volume-performance-attributes': {
'is-atime-update-enabled': 'true'
},
'volume-state-attributes': {
'state': "online",
'is-nvfail-enabled': 'true'
},
'volume-space-attributes': {
'space-guarantee': 'none',
'size': vol_details['size'],
'percentage-snapshot-reserve': vol_details['percent_snapshot_space'],
'space-slo': 'thick'
},
'volume-snapshot-attributes': {
'snapshot-policy': vol_details['snapshot_policy']
},
'volume-comp-aggr-attributes': {
'tiering-policy': 'snapshot-only'
},
'volume-security-attributes': {
'volume-security-unix-attributes': {
'permissions': vol_details['unix_permissions']
}
},
'volume-vserver-dr-protection-attributes': {
'vserver-dr-protection': vol_details['vserver_dr_protection'],
},
'volume-qos-attributes': {
'policy-group-name': vol_details['qos_policy_group'],
'adaptive-policy-group-name': vol_details['qos_adaptive_policy_group']
}
}
}
}
xml.translate_struct(attributes)
return xml
@staticmethod
def build_flex_group_info(vol_details):
''' build xml data for flexGroup volume-attributes '''
xml = netapp_utils.zapi.NaElement('xml')
attributes = {
'num-records': 1,
'attributes-list': {
'volume-attributes': {
'volume-id-attributes': {
'aggr-list': vol_details['aggregate'],
'junction-path': vol_details['junction_path'],
'style-extended': 'flexgroup',
'comment': vol_details['comment']
},
'volume-language-attributes': {
'language-code': 'en'
},
'volume-export-attributes': {
'policy': 'default'
},
'volume-performance-attributes': {
'is-atime-update-enabled': 'true'
},
'volume-state-attributes': {
'state': "online"
},
'volume-space-attributes': {
'space-guarantee': 'none',
'size': vol_details['size']
},
'volume-snapshot-attributes': {
'snapshot-policy': vol_details['snapshot_policy']
},
'volume-security-attributes': {
'volume-security-unix-attributes': {
'permissions': vol_details['unix_permissions']
}
}
}
}
}
xml.translate_struct(attributes)
return xml
@staticmethod
def build_job_info(error):
''' build xml data for a job '''
xml = netapp_utils.zapi.NaElement('xml')
attributes = netapp_utils.zapi.NaElement('attributes')
if error is None:
state = 'success'
elif error == 'time_out':
state = 'running'
elif error == 'failure':
state = 'failure'
else:
state = 'other'
attributes.add_node_with_children('job-info', **{
'job-state': state,
'job-progress': 'dummy',
'job-completion': error,
})
xml.add_child_elem(attributes)
xml.add_new_child('result-status', 'in_progress')
xml.add_new_child('result-jobid', '1234')
return xml
@staticmethod
def build_modify_error():
''' build xml data for modify error '''
xml = netapp_utils.zapi.NaElement('xml')
attributes = netapp_utils.zapi.NaElement('failure-list')
info_list_obj = netapp_utils.zapi.NaElement('volume-modify-iter-info')
info_list_obj.add_new_child('error-message', 'modify error message')
attributes.add_child_elem(info_list_obj)
xml.add_child_elem(attributes)
return xml
@staticmethod
def build_success_modify_async():
''' build xml data for success modify async '''
xml = netapp_utils.zapi.NaElement('xml')
attributes = netapp_utils.zapi.NaElement('success-list')
info_list_obj = netapp_utils.zapi.NaElement('volume-modify-iter-async-info')
info_list_obj.add_new_child('status', 'in_progress')
info_list_obj.add_new_child('jobid', '1234')
attributes.add_child_elem(info_list_obj)
xml.add_child_elem(attributes)
return xml
@staticmethod
def build_failure_modify_async():
''' build xml data for failure modify async '''
xml = netapp_utils.zapi.NaElement('xml')
attributes = netapp_utils.zapi.NaElement('failure-list')
info_list_obj = netapp_utils.zapi.NaElement('volume-modify-iter-async-info')
info_list_obj.add_new_child('status', 'failed')
info_list_obj.add_new_child('jobid', '1234')
info_list_obj.add_new_child('error-message', 'modify error message')
attributes.add_child_elem(info_list_obj)
xml.add_child_elem(attributes)
return xml
class TestMyModule(unittest.TestCase):
''' a group of related Unit Tests '''
def setUp(self):
self.mock_module_helper = patch.multiple(basic.AnsibleModule,
exit_json=exit_json,
fail_json=fail_json)
self.mock_module_helper.start()
self.addCleanup(self.mock_module_helper.stop)
self.mock_vol = {
'name': 'test_vol',
'aggregate': 'test_aggr',
'junction_path': '/test',
'vserver': 'test_vserver',
'size': 20971520,
'unix_permissions': '755',
'snapshot_policy': 'default',
'qos_policy_group': 'performance',
'qos_adaptive_policy_group': 'performance',
'percent_snapshot_space': 60,
'language': 'en',
'vserver_dr_protection': 'unprotected',
'comment': 'test comment'
}
def mock_args(self, tag=None):
args = {
'hostname': 'test',
'username': 'test_user',
'password': 'test_pass!',
'name': self.mock_vol['name'],
'vserver': self.mock_vol['vserver'],
'space_guarantee': 'none',
'policy': 'default',
'language': self.mock_vol['language'],
'is_online': True,
'unix_permissions': '---rwxr-xr-x',
'snapshot_policy': 'default',
'qos_policy_group': 'performance',
'qos_adaptive_policy_group': 'performance',
'size': 20,
'size_unit': 'mb',
'junction_path': '/test',
'percent_snapshot_space': 60,
'type': 'type',
'nvfail_enabled': True,
'space_slo': 'thick',
'comment': self.mock_vol['comment']
}
if tag is None:
args['aggregate_name'] = self.mock_vol['aggregate']
return args
elif tag == 'flexGroup_manual':
args['aggr_list'] = 'aggr_0,aggr_1'
args['aggr_list_multiplier'] = 2
return args
elif tag == 'flexGroup_auto':
args['auto_provision_as'] = 'flexgroup'
return args
def get_volume_mock_object(self, kind=None, job_error=None):
"""
Helper method to return an na_ontap_volume object
:param kind: passes this param to MockONTAPConnection().
:param job_error: error message when getting job status.
:return: na_ontap_volume object
"""
vol_obj = vol_module()
vol_obj.ems_log_event = Mock(return_value=None)
vol_obj.cluster = Mock()
vol_obj.cluster.invoke_successfully = Mock()
vol_obj.volume_style = None
if kind is None:
vol_obj.server = MockONTAPConnection()
elif kind == 'volume':
vol_obj.server = MockONTAPConnection(kind='volume', data=self.mock_vol)
elif kind == 'job_info':
vol_obj.server = MockONTAPConnection(kind='job_info', data=self.mock_vol, job_error=job_error)
elif kind == 'error_modify':
vol_obj.server = MockONTAPConnection(kind='error_modify', data=self.mock_vol)
elif kind == 'failure_modify_async':
vol_obj.server = MockONTAPConnection(kind='failure_modify_async', data=self.mock_vol)
elif kind == 'success_modify_async':
vol_obj.server = MockONTAPConnection(kind='success_modify_async', data=self.mock_vol)
elif kind == 'zapi_error':
vol_obj.server = MockONTAPConnection(kind='zapi_error', data=self.mock_vol)
return vol_obj
def test_module_fail_when_required_args_missing(self):
''' required arguments are reported as errors '''
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({})
vol_module()
print('Info: %s' % exc.value.args[0]['msg'])
def test_get_nonexistent_volume(self):
''' Test if get_volume returns None for non-existent volume '''
set_module_args(self.mock_args())
result = self.get_volume_mock_object().get_volume()
assert result is None
def test_get_existing_volume(self):
''' Test if get_volume returns details for existing volume '''
set_module_args(self.mock_args())
result = self.get_volume_mock_object('volume').get_volume()
assert result['name'] == self.mock_vol['name']
assert result['size'] == self.mock_vol['size']
def test_create_error_missing_param(self):
''' Test if create throws an error if aggregate_name is not specified'''
data = self.mock_args()
del data['aggregate_name']
set_module_args(data)
with pytest.raises(AnsibleFailJson) as exc:
self.get_volume_mock_object('volume').create_volume()
msg = 'Error provisioning volume test_vol: aggregate_name is required'
assert exc.value.args[0]['msg'] == msg
def test_successful_create(self):
''' Test successful create '''
data = self.mock_args()
data['size'] = 20
data['encrypt'] = True
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_volume_mock_object().apply()
assert exc.value.args[0]['changed']
def test_create_idempotency(self):
''' Test create idempotency '''
set_module_args(self.mock_args())
obj = self.get_volume_mock_object('volume')
with pytest.raises(AnsibleExitJson) as exc:
obj.apply()
assert not exc.value.args[0]['changed']
def test_successful_delete(self):
''' Test delete existing volume '''
data = self.mock_args()
data['state'] = 'absent'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_volume_mock_object('volume').apply()
assert exc.value.args[0]['changed']
def test_delete_idempotency(self):
''' Test delete idempotency '''
data = self.mock_args()
data['state'] = 'absent'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_volume_mock_object().apply()
assert not exc.value.args[0]['changed']
def test_successful_modify_size(self):
''' Test successful modify size '''
data = self.mock_args()
data['size'] = 200
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_volume_mock_object('volume').apply()
assert exc.value.args[0]['changed']
def test_modify_idempotency(self):
''' Test modify idempotency '''
data = self.mock_args()
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_volume_mock_object('volume').apply()
assert not exc.value.args[0]['changed']
def test_modify_error(self):
''' Test modify idempotency '''
data = self.mock_args()
set_module_args(data)
with pytest.raises(AnsibleFailJson) as exc:
self.get_volume_mock_object('error_modify').volume_modify_attributes(dict())
assert exc.value.args[0]['msg'] == 'Error modifying volume test_vol: modify error message'
def test_mount_volume(self):
''' Test mount volume '''
data = self.mock_args()
data['junction_path'] = "/test123"
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_volume_mock_object('volume').apply()
assert exc.value.args[0]['changed']
def test_unmount_volume(self):
''' Test unmount volume '''
data = self.mock_args()
data['junction_path'] = ""
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_volume_mock_object('volume').apply()
assert exc.value.args[0]['changed']
def test_successful_modify_space(self):
''' Test successful modify space '''
data = self.mock_args()
data['space_guarantee'] = 'volume'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_volume_mock_object('volume').apply()
assert exc.value.args[0]['changed']
def test_successful_modify_unix_permissions(self):
''' Test successful modify unix_permissions '''
data = self.mock_args()
data['unix_permissions'] = '---rw-r-xr-x'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_volume_mock_object('volume').apply()
assert exc.value.args[0]['changed']
def test_successful_modify_snapshot_policy(self):
''' Test successful modify snapshot_policy '''
data = self.mock_args()
data['snapshot_policy'] = 'default-1weekly'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_volume_mock_object('volume').apply()
assert exc.value.args[0]['changed']
def test_successful_modify_percent_snapshot_space(self):
''' Test successful modify percent_snapshot_space '''
data = self.mock_args()
data['percent_snapshot_space'] = '90'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_volume_mock_object('volume').apply()
assert exc.value.args[0]['changed']
def test_successful_modify_qos_policy_group(self):
''' Test successful modify qos_policy_group '''
data = self.mock_args()
data['qos_policy_group'] = 'extreme'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_volume_mock_object('volume').apply()
assert exc.value.args[0]['changed']
def test_successful_modify_qos_adaptive_policy_group(self):
''' Test successful modify qos_adaptive_policy_group '''
data = self.mock_args()
data['qos_adaptive_policy_group'] = 'extreme'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_volume_mock_object('volume').apply()
assert exc.value.args[0]['changed']
def test_successful_move(self):
''' Test successful modify aggregate '''
data = self.mock_args()
data['aggregate_name'] = 'different_aggr'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_volume_mock_object('volume').apply()
assert exc.value.args[0]['changed']
@patch('ansible.modules.storage.netapp.na_ontap_volume.NetAppOntapVolume.get_volume')
def test_successful_rename(self, get_volume):
''' Test successful rename volume '''
data = self.mock_args()
data['from_name'] = self.mock_vol['name']
data['name'] = 'new_name'
set_module_args(data)
current = {
'hostname': 'test',
'username': 'test_user',
'password': 'test_pass!',
'name': self.mock_vol['name'],
'vserver': self.mock_vol['vserver'],
}
get_volume.side_effect = [
None,
current
]
with pytest.raises(AnsibleExitJson) as exc:
self.get_volume_mock_object().apply()
assert exc.value.args[0]['changed']
@patch('ansible.modules.storage.netapp.na_ontap_volume.NetAppOntapVolume.get_volume')
def test_successful_rename_async(self, get_volume):
''' Test successful rename volume '''
data = self.mock_args()
data['from_name'] = self.mock_vol['name']
data['name'] = 'new_name'
set_module_args(data)
current = {
'hostname': 'test',
'username': 'test_user',
'password': 'test_pass!',
'name': self.mock_vol['name'],
'vserver': self.mock_vol['vserver'],
'is_infinite': True
}
get_volume.side_effect = [
None,
current
]
obj = self.get_volume_mock_object('job_info')
with pytest.raises(AnsibleExitJson) as exc:
obj.apply()
assert exc.value.args[0]['changed']
@patch('ansible.modules.storage.netapp.na_ontap_volume.NetAppOntapVolume.change_volume_state')
@patch('ansible.modules.storage.netapp.na_ontap_volume.NetAppOntapVolume.volume_mount')
def test_modify_helper(self, mount_volume, change_state):
data = self.mock_args()
set_module_args(data)
modify = {
'is_online': False,
'junction_path': 'something'
}
obj = self.get_volume_mock_object('volume')
obj.modify_volume(modify)
change_state.assert_called_with()
mount_volume.assert_called_with()
@patch('ansible.modules.storage.netapp.na_ontap_volume.NetAppOntapVolume.get_volume')
def test_compare_chmod_value_true_1(self, get_volume):
data = self.mock_args()
data['unix_permissions'] = '------------'
set_module_args(data)
current = {
'unix_permissions': '0'
}
get_volume.side_effect = [
current
]
obj = self.get_volume_mock_object()
assert obj.compare_chmod_value(current)
@patch('ansible.modules.storage.netapp.na_ontap_volume.NetAppOntapVolume.get_volume')
def test_compare_chmod_value_true_2(self, get_volume):
data = self.mock_args()
data['unix_permissions'] = '---rwxrwxrwx'
set_module_args(data)
current = {
'unix_permissions': '777'
}
get_volume.side_effect = [
current
]
obj = self.get_volume_mock_object()
assert obj.compare_chmod_value(current)
@patch('ansible.modules.storage.netapp.na_ontap_volume.NetAppOntapVolume.get_volume')
def test_compare_chmod_value_true_3(self, get_volume):
data = self.mock_args()
data['unix_permissions'] = '---rwxr-xr-x'
set_module_args(data)
current = {
'unix_permissions': '755'
}
get_volume.side_effect = [
current
]
obj = self.get_volume_mock_object()
assert obj.compare_chmod_value(current)
@patch('ansible.modules.storage.netapp.na_ontap_volume.NetAppOntapVolume.get_volume')
def test_compare_chmod_value_true_3(self, get_volume):
data = self.mock_args()
data['unix_permissions'] = '755'
set_module_args(data)
current = {
'unix_permissions': '755'
}
get_volume.side_effect = [
current
]
obj = self.get_volume_mock_object()
assert obj.compare_chmod_value(current)
@patch('ansible.modules.storage.netapp.na_ontap_volume.NetAppOntapVolume.get_volume')
def test_compare_chmod_value_false_1(self, get_volume):
data = self.mock_args()
data['unix_permissions'] = '---rwxrwxrwx'
set_module_args(data)
current = {
'unix_permissions': '0'
}
get_volume.side_effect = [
current
]
obj = self.get_volume_mock_object()
assert not obj.compare_chmod_value(current)
@patch('ansible.modules.storage.netapp.na_ontap_volume.NetAppOntapVolume.get_volume')
def test_compare_chmod_value_false_2(self, get_volume):
data = self.mock_args()
data['unix_permissions'] = '---rwxrwxrwx'
set_module_args(data)
current = None
get_volume.side_effect = [
current
]
obj = self.get_volume_mock_object()
assert not obj.compare_chmod_value(current)
@patch('ansible.modules.storage.netapp.na_ontap_volume.NetAppOntapVolume.get_volume')
def test_compare_chmod_value_invalid_input_1(self, get_volume):
data = self.mock_args()
data['unix_permissions'] = '---xwrxwrxwr'
set_module_args(data)
current = {
'unix_permissions': '777'
}
get_volume.side_effect = [
current
]
obj = self.get_volume_mock_object()
assert not obj.compare_chmod_value(current)
@patch('ansible.modules.storage.netapp.na_ontap_volume.NetAppOntapVolume.get_volume')
def test_compare_chmod_value_invalid_input_2(self, get_volume):
data = self.mock_args()
data['unix_permissions'] = '---rwx-wx--a'
set_module_args(data)
current = {
'unix_permissions': '0'
}
get_volume.side_effect = [
current
]
obj = self.get_volume_mock_object()
assert not obj.compare_chmod_value(current)
@patch('ansible.modules.storage.netapp.na_ontap_volume.NetAppOntapVolume.get_volume')
def test_compare_chmod_value_invalid_input_3(self, get_volume):
data = self.mock_args()
data['unix_permissions'] = '---'
set_module_args(data)
current = {
'unix_permissions': '0'
}
get_volume.side_effect = [
current
]
obj = self.get_volume_mock_object()
assert not obj.compare_chmod_value(current)
def test_successful_create_flex_group_manually(self):
''' Test successful create flexGroup manually '''
data = self.mock_args('flexGroup_manual')
data['time_out'] = 20
set_module_args(data)
obj = self.get_volume_mock_object('job_info')
with pytest.raises(AnsibleExitJson) as exc:
obj.apply()
assert exc.value.args[0]['changed']
def test_successful_create_flex_group_auto_provision(self):
''' Test successful create flexGroup auto provision '''
data = self.mock_args('flexGroup_auto')
data['time_out'] = 20
set_module_args(data)
obj = self.get_volume_mock_object('job_info')
with pytest.raises(AnsibleExitJson) as exc:
obj.apply()
assert exc.value.args[0]['changed']
@patch('ansible.modules.storage.netapp.na_ontap_volume.NetAppOntapVolume.get_volume')
def test_successful_delete_flex_group(self, get_volume):
''' Test successful delete flexGroup '''
data = self.mock_args('flexGroup_manual')
data['state'] = 'absent'
set_module_args(data)
current = {
'hostname': 'test',
'username': 'test_user',
'password': 'test_pass!',
'name': self.mock_vol['name'],
'vserver': self.mock_vol['vserver'],
'style_extended': 'flexgroup',
'unix_permissions': '755'
}
get_volume.side_effect = [
current
]
obj = self.get_volume_mock_object('job_info')
with pytest.raises(AnsibleExitJson) as exc:
obj.apply()
assert exc.value.args[0]['changed']
@patch('ansible.modules.storage.netapp.na_ontap_volume.NetAppOntapVolume.get_volume')
def test_successful_resize_flex_group(self, get_volume):
''' Test successful reszie flexGroup '''
data = self.mock_args('flexGroup_manual')
data['size'] = 400
data['size_unit'] = 'mb'
set_module_args(data)
current = {
'hostname': 'test',
'username': 'test_user',
'password': 'test_pass!',
'name': self.mock_vol['name'],
'vserver': self.mock_vol['vserver'],
'style_extended': 'flexgroup',
'size': 20971520,
'unix_permissions': '755'
}
get_volume.side_effect = [
current
]
obj = self.get_volume_mock_object('job_info')
with pytest.raises(AnsibleExitJson) as exc:
obj.apply()
assert exc.value.args[0]['changed']
@patch('ansible.modules.storage.netapp.na_ontap_volume.NetAppOntapVolume.check_job_status')
@patch('ansible.modules.storage.netapp.na_ontap_volume.NetAppOntapVolume.get_volume')
def test_successful_modify_unix_permissions_flex_group(self, get_volume, check_job_status):
''' Test successful modify unix permissions flexGroup '''
data = self.mock_args('flexGroup_manual')
data['time_out'] = 20
data['unix_permissions'] = '---rw-r-xr-x'
set_module_args(data)
current = {
'hostname': 'test',
'username': 'test_user',
'password': 'test_pass!',
'name': self.mock_vol['name'],
'vserver': self.mock_vol['vserver'],
'style_extended': 'flexgroup',
'unix_permissions': '777'
}
get_volume.side_effect = [
current
]
check_job_status.side_effect = [
None
]
obj = self.get_volume_mock_object('success_modify_async')
with pytest.raises(AnsibleExitJson) as exc:
obj.apply()
assert exc.value.args[0]['changed']
@patch('ansible.modules.storage.netapp.na_ontap_volume.NetAppOntapVolume.get_volume')
def test_successful_modify_unix_permissions_flex_group_0_time_out(self, get_volume):
''' Test successful modify unix permissions flexGroup '''
data = self.mock_args('flexGroup_manual')
data['time_out'] = 0
data['unix_permissions'] = '---rw-r-xr-x'
set_module_args(data)
current = {
'hostname': 'test',
'username': 'test_user',
'password': 'test_pass!',
'name': self.mock_vol['name'],
'vserver': self.mock_vol['vserver'],
'style_extended': 'flexgroup',
'unix_permissions': '777'
}
get_volume.side_effect = [
current
]
obj = self.get_volume_mock_object('success_modify_async')
with pytest.raises(AnsibleExitJson) as exc:
obj.apply()
assert exc.value.args[0]['changed']
@patch('ansible.modules.storage.netapp.na_ontap_volume.NetAppOntapVolume.check_job_status')
@patch('ansible.modules.storage.netapp.na_ontap_volume.NetAppOntapVolume.get_volume')
def test_error_modify_unix_permissions_flex_group(self, get_volume, check_job_status):
''' Test error modify unix permissions flexGroup '''
data = self.mock_args('flexGroup_manual')
data['time_out'] = 20
data['unix_permissions'] = '---rw-r-xr-x'
set_module_args(data)
current = {
'hostname': 'test',
'username': 'test_user',
'password': 'test_pass!',
'name': self.mock_vol['name'],
'vserver': self.mock_vol['vserver'],
'style_extended': 'flexgroup',
'unix_permissions': '777'
}
get_volume.side_effect = [
current
]
check_job_status.side_effect = ['error']
obj = self.get_volume_mock_object('success_modify_async')
with pytest.raises(AnsibleFailJson) as exc:
obj.apply()
assert exc.value.args[0]['msg'] == 'Error when modify volume: error'
@patch('ansible.modules.storage.netapp.na_ontap_volume.NetAppOntapVolume.get_volume')
def test_failure_modify_unix_permissions_flex_group(self, get_volume):
''' Test failure modify unix permissions flexGroup '''
data = self.mock_args('flexGroup_manual')
data['unix_permissions'] = '---rw-r-xr-x'
data['time_out'] = 20
set_module_args(data)
current = {
'hostname': 'test',
'username': 'test_user',
'password': 'test_pass!',
'name': self.mock_vol['name'],
'vserver': self.mock_vol['vserver'],
'style_extended': 'flexvol',
'unix_permissions': '777'
}
get_volume.side_effect = [
current
]
obj = self.get_volume_mock_object('failure_modify_async')
with pytest.raises(AnsibleFailJson) as exc:
obj.apply()
assert exc.value.args[0]['msg'] == 'Error modifying volume test_vol: modify error message'
@patch('ansible.modules.storage.netapp.na_ontap_volume.NetAppOntapVolume.get_volume')
def test_successful_offline_state_flex_group(self, get_volume):
''' Test successful offline flexGroup state '''
data = self.mock_args('flexGroup_manual')
data['is_online'] = False
set_module_args(data)
current = {
'hostname': 'test',
'username': 'test_user',
'password': 'test_pass!',
'name': self.mock_vol['name'],
'vserver': self.mock_vol['vserver'],
'style_extended': 'flexgroup',
'is_online': True,
'junction_path': 'anything',
'unix_permissions': '755'
}
get_volume.side_effect = [
current
]
obj = self.get_volume_mock_object('job_info')
with pytest.raises(AnsibleExitJson) as exc:
obj.apply()
assert exc.value.args[0]['changed']
@patch('ansible.modules.storage.netapp.na_ontap_volume.NetAppOntapVolume.get_volume')
def test_successful_online_state_flex_group(self, get_volume):
''' Test successful online flexGroup state '''
data = self.mock_args('flexGroup_manual')
set_module_args(data)
current = {
'hostname': 'test',
'username': 'test_user',
'password': 'test_pass!',
'name': self.mock_vol['name'],
'vserver': self.mock_vol['vserver'],
'style_extended': 'flexgroup',
'is_online': False,
'junction_path': 'anything',
'unix_permissions': '755'
}
get_volume.side_effect = [
current
]
obj = self.get_volume_mock_object('job_info')
with pytest.raises(AnsibleExitJson) as exc:
obj.apply()
assert exc.value.args[0]['changed']
def test_check_job_status_error(self):
''' Test check job status error '''
data = self.mock_args('flexGroup_manual')
data['time_out'] = 0
set_module_args(data)
obj = self.get_volume_mock_object('job_info', job_error='failure')
result = obj.check_job_status('123')
assert result == 'failure'
def test_check_job_status_time_out_is_0(self):
''' Test check job status time out is 0'''
data = self.mock_args('flexGroup_manual')
data['time_out'] = 0
set_module_args(data)
obj = self.get_volume_mock_object('job_info', job_error='time_out')
result = obj.check_job_status('123')
assert result == 'job completion exceeded expected timer of: 0 seconds'
def test_check_job_status_unexpected(self):
''' Test check job status unexpected state '''
data = self.mock_args('flexGroup_manual')
data['time_out'] = 20
set_module_args(data)
obj = self.get_volume_mock_object('job_info', job_error='other')
with pytest.raises(AnsibleFailJson) as exc:
obj.check_job_status('123')
assert exc.value.args[0]['failed']
def test_error_assign_efficiency_policy(self):
data = self.mock_args()
data['efficiency_policy'] = 'test_policy'
set_module_args(data)
obj = self.get_volume_mock_object('zapi_error')
with pytest.raises(AnsibleFailJson) as exc:
obj.assign_efficiency_policy()
assert exc.value.args[0]['msg'] == 'Error enable efficiency on volume test_vol: NetApp API failed. Reason - test:error'
def test_error_assign_efficiency_policy_async(self):
data = self.mock_args()
data['efficiency_policy'] = 'test_policy'
set_module_args(data)
obj = self.get_volume_mock_object('zapi_error')
with pytest.raises(AnsibleFailJson) as exc:
obj.assign_efficiency_policy_async()
assert exc.value.args[0]['msg'] == 'Error enable efficiency on volume test_vol: NetApp API failed. Reason - test:error'
def test_successful_modify_tiering_policy(self):
''' Test successful modify tiering policy '''
data = self.mock_args()
data['tiering_policy'] = 'auto'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_volume_mock_object('volume').apply()
assert exc.value.args[0]['changed']
def test_successful_modify_vserver_dr_protection(self):
''' Test successful modify vserver_dr_protection '''
data = self.mock_args()
data['vserver_dr_protection'] = 'protected'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_volume_mock_object('volume').apply()
assert exc.value.args[0]['changed']
|
siddharths067/HuHubaProject | refs/heads/master | lib/requests_toolbelt/multipart/encoder.py | 17 | # -*- coding: utf-8 -*-
"""
requests_toolbelt.multipart.encoder
===================================
This holds all of the implementation details of the MultipartEncoder
"""
import contextlib
import io
import os
from uuid import uuid4
from .._compat import fields
class MultipartEncoder(object):
"""
The ``MultipartEncoder`` oject is a generic interface to the engine that
will create a ``multipart/form-data`` body for you.
The basic usage is:
.. code-block:: python
import requests
from requests_toolbelt import MultipartEncoder
encoder = MultipartEncoder({'field': 'value',
'other_field', 'other_value'})
r = requests.post('https://httpbin.org/post', data=encoder,
headers={'Content-Type': encoder.content_type})
If you do not need to take advantage of streaming the post body, you can
also do:
.. code-block:: python
r = requests.post('https://httpbin.org/post',
data=encoder.to_string(),
headers={'Content-Type': encoder.content_type})
If you want the encoder to use a specific order, you can use an
OrderedDict or more simply, a list of tuples:
.. code-block:: python
encoder = MultipartEncoder([('field', 'value'),
('other_field', 'other_value')])
.. versionchanged:: 0.4.0
You can also provide tuples as part values as you would provide them to
requests' ``files`` parameter.
.. code-block:: python
encoder = MultipartEncoder({
'field': ('file_name', b'{"a": "b"}', 'application/json',
{'X-My-Header': 'my-value'})
])
.. warning::
This object will end up directly in :mod:`httplib`. Currently,
:mod:`httplib` has a hard-coded read size of **8192 bytes**. This
means that it will loop until the file has been read and your upload
could take a while. This is **not** a bug in requests. A feature is
being considered for this object to allow you, the user, to specify
what size should be returned on a read. If you have opinions on this,
please weigh in on `this issue`_.
.. _this issue:
https://github.com/sigmavirus24/requests-toolbelt/issues/75
"""
def __init__(self, fields, boundary=None, encoding='utf-8'):
#: Boundary value either passed in by the user or created
self.boundary_value = boundary or uuid4().hex
# Computed boundary
self.boundary = '--{0}'.format(self.boundary_value)
#: Encoding of the data being passed in
self.encoding = encoding
# Pre-encoded boundary
self._encoded_boundary = b''.join([
encode_with(self.boundary, self.encoding),
encode_with('\r\n', self.encoding)
])
#: Fields provided by the user
self.fields = fields
#: Whether or not the encoder is finished
self.finished = False
#: Pre-computed parts of the upload
self.parts = []
# Pre-computed parts iterator
self._iter_parts = iter([])
# The part we're currently working with
self._current_part = None
# Cached computation of the body's length
self._len = None
# Our buffer
self._buffer = CustomBytesIO(encoding=encoding)
# Pre-compute each part's headers
self._prepare_parts()
# Load boundary into buffer
self._write_boundary()
@property
def len(self):
"""Length of the multipart/form-data body.
requests will first attempt to get the length of the body by calling
``len(body)`` and then by checking for the ``len`` attribute.
On 32-bit systems, the ``__len__`` method cannot return anything
larger than an integer (in C) can hold. If the total size of the body
is even slightly larger than 4GB users will see an OverflowError. This
manifested itself in `bug #80`_.
As such, we now calculate the length lazily as a property.
.. _bug #80:
https://github.com/sigmavirus24/requests-toolbelt/issues/80
"""
# If _len isn't already calculated, calculate, return, and set it
return self._len or self._calculate_length()
def __repr__(self):
return '<MultipartEncoder: {0!r}>'.format(self.fields)
def _calculate_length(self):
"""
This uses the parts to calculate the length of the body.
This returns the calculated length so __len__ can be lazy.
"""
boundary_len = len(self.boundary) # Length of --{boundary}
# boundary length + header length + body length + len('\r\n') * 2
self._len = sum(
(boundary_len + total_len(p) + 4) for p in self.parts
) + boundary_len + 4
return self._len
def _calculate_load_amount(self, read_size):
"""This calculates how many bytes need to be added to the buffer.
When a consumer read's ``x`` from the buffer, there are two cases to
satisfy:
1. Enough data in the buffer to return the requested amount
2. Not enough data
This function uses the amount of unread bytes in the buffer and
determines how much the Encoder has to load before it can return the
requested amount of bytes.
:param int read_size: the number of bytes the consumer requests
:returns: int -- the number of bytes that must be loaded into the
buffer before the read can be satisfied. This will be strictly
non-negative
"""
amount = read_size - total_len(self._buffer)
return amount if amount > 0 else 0
def _load(self, amount):
"""Load ``amount`` number of bytes into the buffer."""
self._buffer.smart_truncate()
part = self._current_part or self._next_part()
while amount == -1 or amount > 0:
written = 0
if not part.bytes_left_to_write():
written += self._write(b'\r\n')
written += self._write_boundary()
part = self._next_part()
if not part:
written += self._write_closing_boundary()
self.finished = True
break
written += part.write_to(self._buffer, amount)
if amount != -1:
amount -= written
def _next_part(self):
try:
p = self._current_part = next(self._iter_parts)
except StopIteration:
p = None
return p
def _iter_fields(self):
_fields = self.fields
if hasattr(self.fields, 'items'):
_fields = list(self.fields.items())
for k, v in _fields:
file_name = None
file_type = None
file_headers = None
if isinstance(v, (list, tuple)):
if len(v) == 2:
file_name, file_pointer = v
elif len(v) == 3:
file_name, file_pointer, file_type = v
else:
file_name, file_pointer, file_type, file_headers = v
else:
file_pointer = v
field = fields.RequestField(name=k, data=file_pointer,
filename=file_name,
headers=file_headers)
field.make_multipart(content_type=file_type)
yield field
def _prepare_parts(self):
"""This uses the fields provided by the user and creates Part objects.
It populates the `parts` attribute and uses that to create a
generator for iteration.
"""
enc = self.encoding
self.parts = [Part.from_field(f, enc) for f in self._iter_fields()]
self._iter_parts = iter(self.parts)
def _write(self, bytes_to_write):
"""Write the bytes to the end of the buffer.
:param bytes bytes_to_write: byte-string (or bytearray) to append to
the buffer
:returns: int -- the number of bytes written
"""
return self._buffer.append(bytes_to_write)
def _write_boundary(self):
"""Write the boundary to the end of the buffer."""
return self._write(self._encoded_boundary)
def _write_closing_boundary(self):
"""Write the bytes necessary to finish a multipart/form-data body."""
with reset(self._buffer):
self._buffer.seek(-2, 2)
self._buffer.write(b'--\r\n')
return 2
def _write_headers(self, headers):
"""Write the current part's headers to the buffer."""
return self._write(encode_with(headers, self.encoding))
@property
def content_type(self):
return str(
'multipart/form-data; boundary={0}'.format(self.boundary_value)
)
def to_string(self):
"""Return the entirety of the data in the encoder.
.. note::
This simply reads all of the data it can. If you have started
streaming or reading data from the encoder, this method will only
return whatever data is left in the encoder.
.. note::
This method affects the internal state of the encoder. Calling
this method will exhaust the encoder.
:returns: the multipart message
:rtype: bytes
"""
return self.read()
def read(self, size=-1):
"""Read data from the streaming encoder.
:param int size: (optional), If provided, ``read`` will return exactly
that many bytes. If it is not provided, it will return the
remaining bytes.
:returns: bytes
"""
if self.finished:
return self._buffer.read(size)
bytes_to_load = size
if bytes_to_load != -1 and bytes_to_load is not None:
bytes_to_load = self._calculate_load_amount(int(size))
self._load(bytes_to_load)
return self._buffer.read(size)
def IDENTITY(monitor):
return monitor
class MultipartEncoderMonitor(object):
"""
An object used to monitor the progress of a :class:`MultipartEncoder`.
The :class:`MultipartEncoder` should only be responsible for preparing and
streaming the data. For anyone who wishes to monitor it, they shouldn't be
using that instance to manage that as well. Using this class, they can
monitor an encoder and register a callback. The callback receives the
instance of the monitor.
To use this monitor, you construct your :class:`MultipartEncoder` as you
normally would.
.. code-block:: python
from requests_toolbelt import (MultipartEncoder,
MultipartEncoderMonitor)
import requests
def callback(encoder, bytes_read):
# Do something with this information
pass
m = MultipartEncoder(fields={'field0': 'value0'})
monitor = MultipartEncoderMonitor(m, callback)
headers = {'Content-Type': montior.content_type}
r = requests.post('https://httpbin.org/post', data=monitor,
headers=headers)
Alternatively, if your use case is very simple, you can use the following
pattern.
.. code-block:: python
from requests_toolbelt import MultipartEncoderMonitor
import requests
def callback(encoder, bytes_read):
# Do something with this information
pass
monitor = MultipartEncoderMonitor.from_fields(
fields={'field0': 'value0'}, callback
)
headers = {'Content-Type': montior.content_type}
r = requests.post('https://httpbin.org/post', data=monitor,
headers=headers)
"""
def __init__(self, encoder, callback=None):
#: Instance of the :class:`MultipartEncoder` being monitored
self.encoder = encoder
#: Optionally function to call after a read
self.callback = callback or IDENTITY
#: Number of bytes already read from the :class:`MultipartEncoder`
#: instance
self.bytes_read = 0
#: Avoid the same problem in bug #80
self.len = self.encoder.len
@classmethod
def from_fields(cls, fields, boundary=None, encoding='utf-8',
callback=None):
encoder = MultipartEncoder(fields, boundary, encoding)
return cls(encoder, callback)
@property
def content_type(self):
return self.encoder.content_type
def to_string(self):
return self.read()
def read(self, size=-1):
string = self.encoder.read(size)
self.bytes_read += len(string)
self.callback(self)
return string
def encode_with(string, encoding):
"""Encoding ``string`` with ``encoding`` if necessary.
:param str string: If string is a bytes object, it will not encode it.
Otherwise, this function will encode it with the provided encoding.
:param str encoding: The encoding with which to encode string.
:returns: encoded bytes object
"""
if not (string is None or isinstance(string, bytes)):
return string.encode(encoding)
return string
def readable_data(data, encoding):
"""Coerce the data to an object with a ``read`` method."""
if hasattr(data, 'read'):
return data
return CustomBytesIO(data, encoding)
def total_len(o):
if hasattr(o, '__len__'):
return len(o)
if hasattr(o, 'len'):
return o.len
if hasattr(o, 'fileno'):
try:
fileno = o.fileno()
except io.UnsupportedOperation:
pass
else:
return os.fstat(fileno).st_size
if hasattr(o, 'getvalue'):
# e.g. BytesIO, cStringIO.StringIO
return len(o.getvalue())
@contextlib.contextmanager
def reset(buffer):
"""Keep track of the buffer's current position and write to the end.
This is a context manager meant to be used when adding data to the buffer.
It eliminates the need for every function to be concerned with the
position of the cursor in the buffer.
"""
original_position = buffer.tell()
buffer.seek(0, 2)
yield
buffer.seek(original_position, 0)
def coerce_data(data, encoding):
"""Ensure that every object's __len__ behaves uniformly."""
if not isinstance(data, CustomBytesIO):
if hasattr(data, 'getvalue'):
return CustomBytesIO(data.getvalue(), encoding)
if hasattr(data, 'fileno'):
return FileWrapper(data)
if not hasattr(data, 'read'):
return CustomBytesIO(data, encoding)
return data
def to_list(fields):
if hasattr(fields, 'items'):
return list(fields.items())
return list(fields)
class Part(object):
def __init__(self, headers, body):
self.headers = headers
self.body = body
self.headers_unread = True
self.len = len(self.headers) + total_len(self.body)
@classmethod
def from_field(cls, field, encoding):
"""Create a part from a Request Field generated by urllib3."""
headers = encode_with(field.render_headers(), encoding)
body = coerce_data(field.data, encoding)
return cls(headers, body)
def bytes_left_to_write(self):
"""Determine if there are bytes left to write.
:returns: bool -- ``True`` if there are bytes left to write, otherwise
``False``
"""
to_read = 0
if self.headers_unread:
to_read += len(self.headers)
return (to_read + total_len(self.body)) > 0
def write_to(self, buffer, size):
"""Write the requested amount of bytes to the buffer provided.
The number of bytes written may exceed size on the first read since we
load the headers ambitiously.
:param CustomBytesIO buffer: buffer we want to write bytes to
:param int size: number of bytes requested to be written to the buffer
:returns: int -- number of bytes actually written
"""
written = 0
if self.headers_unread:
written += buffer.append(self.headers)
self.headers_unread = False
while total_len(self.body) > 0 and (size == -1 or written < size):
amount_to_read = size
if size != -1:
amount_to_read = size - written
written += buffer.append(self.body.read(amount_to_read))
return written
class CustomBytesIO(io.BytesIO):
def __init__(self, buffer=None, encoding='utf-8'):
buffer = encode_with(buffer, encoding)
super(CustomBytesIO, self).__init__(buffer)
def _get_end(self):
current_pos = self.tell()
self.seek(0, 2)
length = self.tell()
self.seek(current_pos, 0)
return length
@property
def len(self):
length = self._get_end()
return length - self.tell()
def append(self, bytes):
with reset(self):
written = self.write(bytes)
return written
def smart_truncate(self):
to_be_read = total_len(self)
already_read = self._get_end() - to_be_read
if already_read >= to_be_read:
old_bytes = self.read()
self.seek(0, 0)
self.truncate()
self.write(old_bytes)
self.seek(0, 0) # We want to be at the beginning
class FileWrapper(object):
def __init__(self, file_object):
self.fd = file_object
@property
def len(self):
return total_len(self.fd) - self.fd.tell()
def read(self, length=-1):
return self.fd.read(length)
|
geopython/QGIS | refs/heads/master | tests/code_layout/test_qgsdoccoverage.py | 12 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for API documentation coverage.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '01/02/2015'
__copyright__ = 'Copyright 2016, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import sys
try:
from qgis.static_testing import unittest
except ImportError:
import unittest
from termcolor import colored
from doxygen_parser import DoxygenParser
from acceptable_missing_doc import ACCEPTABLE_MISSING_DOCS, ACCEPTABLE_MISSING_ADDED_NOTE, ACCEPTABLE_MISSING_BRIEF
# TO regenerate the list:
# uncomment the lines under the `# GEN LIST`
# $ export PYTHONPATH=build/output/python
# $ export QGIS_PREFIX_PATH=build/output
# $ python tests/src/python/test_qgsdoccoverage.py
# copy the output to the file:
# tests/src/python/acceptable_missing_doc.py
# in `ACCEPTABLE_MISSING_DOCS = { <past> }`.
class TestQgsDocCoverage(unittest.TestCase):
def testCoverage(self):
print('CTEST_FULL_OUTPUT')
prefixPath = os.environ['QGIS_PREFIX_PATH']
docPath = os.path.join(prefixPath, '..', 'doc', 'api', 'xml')
parser = DoxygenParser(docPath, ACCEPTABLE_MISSING_DOCS, ACCEPTABLE_MISSING_ADDED_NOTE, ACCEPTABLE_MISSING_BRIEF)
coverage = 100.0 * parser.documented_members / parser.documentable_members
missing = parser.documentable_members - parser.documented_members
print("---------------------------------")
print(("{} total documentable members".format(parser.documentable_members)))
print(("{} total contain valid documentation".format(parser.documented_members)))
print(("Total documentation coverage {}%".format(coverage)))
print("---------------------------------")
print(("{} members missing documentation".format(missing)))
print("---------------------------------")
print("Unacceptable missing documentation:")
if parser.undocumented_members:
for cls, props in list(parser.undocumented_members.items()):
print(('\n\nClass {}, {}/{} members documented\n'.format(colored(cls, 'yellow'), props['documented'], props['members'])))
for mem in props['missing_members']:
print((colored(' "' + mem + '"', 'yellow', attrs=['bold'])))
if parser.noncompliant_members:
for cls, props in list(parser.noncompliant_members.items()):
print(('\n\nClass {}, non-compliant members found\n'.format(colored(cls, 'yellow'))))
for p in props:
for mem, error in p.items():
print((colored(' ' + mem + ': ' + error, 'yellow', attrs=['bold'])))
if parser.broken_links:
for cls, props in list(parser.broken_links.items()):
print(('\n\nClass {}, broken see also links found\n'.format(colored(cls, 'yellow'))))
for member, links in props.items():
for l in links:
print((colored(' ' + member + ': ' + l, 'yellow', attrs=['bold'])))
# self.assertEquals(len(parser.undocumented_string), 0, 'FAIL: new undocumented members have been introduced, please add documentation for these members')
if parser.classes_missing_group:
print("---------------------------------")
print('\n')
print((colored('{} classes have been added without Doxygen group tag ("\ingroup"):'.format(len(parser.classes_missing_group)), 'yellow')))
print('')
print((' ' + '\n '.join([colored(cls, 'yellow', attrs=['bold']) for cls in parser.classes_missing_group])))
if parser.classes_missing_version_added:
print("---------------------------------")
print('\n')
print((colored('{} classes have been added without a version added doxygen note ("\since QGIS x.xx"):'.format(len(parser.classes_missing_version_added)), 'yellow')))
print('')
print((' ' + '\n '.join([colored(cls, 'yellow', attrs=['bold']) for cls in parser.classes_missing_version_added])))
if parser.classes_missing_brief:
print("---------------------------------")
print('\n')
print((colored('{} classes have been added without at least a brief description:'.format(len(parser.classes_missing_brief)), 'yellow')))
print('')
print((' ' + '\n '.join([colored(cls, 'yellow', attrs=['bold']) for cls in parser.classes_missing_brief])))
sys.stdout.flush()
self.assertTrue(not parser.undocumented_members, 'Undocumented members found')
self.assertTrue(not parser.classes_missing_group, 'Classes without \\group tag found')
self.assertTrue(not parser.classes_missing_version_added, 'Classes without \\since version tag found')
self.assertTrue(not parser.classes_missing_brief, 'Classes without \\brief description found')
self.assertTrue(not parser.noncompliant_members, 'Non compliant members found')
self.assertTrue(not parser.broken_links, 'Broken links found')
if __name__ == '__main__':
unittest.main()
|
Iftekhar-ifti/androguard | refs/heads/master | androguard/core/bytecodes/dvm.py | 35 | # This file is part of Androguard.
#
# Copyright (C) 2012/2013, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from androguard.core import bytecode
from androguard.core.androconf import CONF, debug, warning, is_android_raw
import sys
import re
import struct
from struct import pack, unpack, calcsize
DEX_FILE_MAGIC_35 = 'dex\n035\x00'
DEX_FILE_MAGIC_36 = 'dex\n036\x00'
ODEX_FILE_MAGIC_35 = 'dey\n035\x00'
ODEX_FILE_MAGIC_36 = 'dey\n036\x00'
TYPE_MAP_ITEM = {
0x0: "TYPE_HEADER_ITEM",
0x1: "TYPE_STRING_ID_ITEM",
0x2: "TYPE_TYPE_ID_ITEM",
0x3: "TYPE_PROTO_ID_ITEM",
0x4: "TYPE_FIELD_ID_ITEM",
0x5: "TYPE_METHOD_ID_ITEM",
0x6: "TYPE_CLASS_DEF_ITEM",
0x1000: "TYPE_MAP_LIST",
0x1001: "TYPE_TYPE_LIST",
0x1002: "TYPE_ANNOTATION_SET_REF_LIST",
0x1003: "TYPE_ANNOTATION_SET_ITEM",
0x2000: "TYPE_CLASS_DATA_ITEM",
0x2001: "TYPE_CODE_ITEM",
0x2002: "TYPE_STRING_DATA_ITEM",
0x2003: "TYPE_DEBUG_INFO_ITEM",
0x2004: "TYPE_ANNOTATION_ITEM",
0x2005: "TYPE_ENCODED_ARRAY_ITEM",
0x2006: "TYPE_ANNOTATIONS_DIRECTORY_ITEM",
}
ACCESS_FLAGS = [
(0x1, 'public'),
(0x2, 'private'),
(0x4, 'protected'),
(0x8, 'static'),
(0x10, 'final'),
(0x20, 'synchronized'),
(0x40, 'bridge'),
(0x80, 'varargs'),
(0x100, 'native'),
(0x200, 'interface'),
(0x400, 'abstract'),
(0x800, 'strict'),
(0x1000, 'synthetic'),
(0x4000, 'enum'),
(0x8000, 'unused'),
(0x10000, 'constructor'),
(0x20000, 'synchronized'),
]
TYPE_DESCRIPTOR = {
'V': 'void',
'Z': 'boolean',
'B': 'byte',
'S': 'short',
'C': 'char',
'I': 'int',
'J': 'long',
'F': 'float',
'D': 'double',
'STR': 'String',
'StringBuilder': 'String'
}
def get_access_flags_string(value) :
"""
Transform an access flags to the corresponding string
:param value: the value of the access flags
:type value: int
:rtype: string
"""
buff = ""
for i in ACCESS_FLAGS :
if (i[0] & value) == i[0] :
buff += i[1] + " "
if buff != "" :
return buff[:-1]
return buff
def get_type(atype, size=None):
"""
Retrieve the type of a descriptor (e.g : I)
"""
if atype.startswith('java.lang'):
atype = atype.replace('java.lang.', '')
res = TYPE_DESCRIPTOR.get(atype.lstrip('java.lang'))
if res is None:
if atype[0] == 'L':
res = atype[1:-1].replace('/', '.')
elif atype[0] == '[':
if size is None:
res = '%s[]' % get_type(atype[1:])
else:
res = '%s[%s]' % (get_type(atype[1:]), size)
else:
res = atype
return res
MATH_DVM_OPCODES = { "add." : '+',
"div." : '/',
"mul." : '*',
"or." : '|',
"sub." : '-',
"and." : '&',
"xor." : '^',
"shl." : "<<",
"shr." : ">>",
}
FIELD_READ_DVM_OPCODES = [ ".get" ]
FIELD_WRITE_DVM_OPCODES = [ ".put" ]
BREAK_DVM_OPCODES = [ "invoke.", "move.", ".put", "if." ]
BRANCH_DVM_OPCODES = [ "throw", "throw.", "if.", "goto", "goto.", "return", "return.", "packed-switch$", "sparse-switch$" ]
def clean_name_instruction( instruction ) :
op_value = instruction.get_op_value()
# goto range
if op_value >= 0x28 and op_value <= 0x2a :
return "goto"
return instruction.get_name()
def static_operand_instruction( instruction ) :
buff = ""
if isinstance(instruction, Instruction) :
# get instructions without registers
for val in instruction.get_literals() :
buff += "%s" % val
op_value = instruction.get_op_value()
if op_value == 0x1a or op_value == 0x1b :
buff += instruction.get_string()
return buff
html_escape_table = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
def readuleb128(buff) :
result = ord( buff.read(1) )
if result > 0x7f :
cur = ord( buff.read(1) )
result = (result & 0x7f) | ((cur & 0x7f) << 7)
if cur > 0x7f :
cur = ord( buff.read(1) )
result |= (cur & 0x7f) << 14
if cur > 0x7f :
cur = ord( buff.read(1) )
result |= (cur & 0x7f) << 21
if cur > 0x7f :
cur = ord( buff.read(1) )
if cur > 0x0f :
warning("possible error while decoding number")
result |= cur << 28
return result
def readusleb128(buff) :
result = ord( buff.read(1) )
if result > 0x7f :
cur = ord( buff.read(1) )
result = (result & 0x7f) | ((cur & 0x7f) << 7)
if cur > 0x7f :
cur = ord( buff.read(1) )
result |= (cur & 0x7f) << 14
if cur > 0x7f :
cur = ord( buff.read(1) )
result |= (cur & 0x7f) << 21
if cur > 0x7f :
cur = ord( buff.read(1) )
result |= cur << 28
return result
def readuleb128p1(buff) :
return readuleb128( buff ) - 1
def readsleb128(buff) :
result = unpack( '=b', buff.read(1) )[0]
if result <= 0x7f :
result = (result << 25)
if result > 0x7fffffff :
result = (0x7fffffff & result) - 0x80000000
result = result >> 25
else :
cur = unpack( '=b', buff.read(1) )[0]
result = (result & 0x7f) | ((cur & 0x7f) << 7)
if cur <= 0x7f :
result = (result << 18) >> 18
else :
cur = unpack( '=b', buff.read(1) )[0]
result |= (cur & 0x7f) << 14
if cur <= 0x7f :
result = (result << 11) >> 11
else :
cur = unpack( '=b', buff.read(1) )[0]
result |= (cur & 0x7f) << 21
if cur <= 0x7f :
result = (result << 4) >> 4
else :
cur = unpack( '=b', buff.read(1) )[0]
result |= cur << 28
return result
def get_sbyte(buff) :
return unpack( '=b', buff.read(1) )[0]
def readsleb128_2(buff) :
result = get_sbyte(buff)
if result <= 0x7f :
result = (result << 25) >> 25
else :
cur = get_sbyte(buff)
result = (result & 0x7f) | ((cur & 0x7f) << 7)
if cur <= 0x7f :
result = (result << 18) >> 18
else :
cur = get_sbyte(buff)
result |= (cur & 0x7f) << 14
if cur <= 0x7f :
result = (result << 11) >> 11
else :
cur = get_sbyte(buff)
result |= (cur & 0x7f) << 21
if cur <= 0x7f :
result = (result << 4) >> 4
else :
cur = get_sbyte(buff)
result |= cur << 28
return result
def writeuleb128(value) :
remaining = value >> 7
buff = ""
while remaining > 0 :
buff += pack( "=B", ((value & 0x7f) | 0x80) )
value = remaining
remaining >>= 7
buff += pack( "=B", value & 0x7f )
return buff
def writesleb128(value) :
remaining = value >> 7
hasMore = True
end = 0
buff = ""
if (value & (-sys.maxint - 1)) == 0 :
end = 0
else :
end = -1
while hasMore :
hasMore = (remaining != end) or ((remaining & 1) != ((value >> 6) & 1))
tmp = 0
if hasMore :
tmp = 0x80
buff += pack( "=B", (value & 0x7f) | (tmp) )
value = remaining
remaining >>= 7
return buff
def determineNext(i, end, m) :
op_value = i.get_op_value()
# throw + return*
if (op_value == 0x27) or (0x0e <= op_value <= 0x11) :
return [ -1 ]
# goto
elif 0x28 <= op_value <= 0x2a :
off = i.get_ref_off() * 2
return [ off + end ]
# if
elif 0x32 <= op_value <= 0x3d :
off = i.get_ref_off() * 2
return [ end + i.get_length(), off + (end) ]
# sparse/packed
elif op_value in (0x2b, 0x2c) :
x = []
x.append( end + i.get_length() )
code = m.get_code().get_bc()
off = i.get_ref_off() * 2
data = code.get_ins_off( off + end )
if data != None :
for target in data.get_targets() :
x.append( target*2 + end )
return x
return []
def determineException(vm, m) :
# no exceptions !
if m.get_code().get_tries_size() <= 0 :
return []
h_off = {}
handler_catch_list = m.get_code().get_handlers()
for try_item in m.get_code().get_tries() :
offset_handler = try_item.get_handler_off() + handler_catch_list.get_off()
if offset_handler in h_off :
h_off[ offset_handler ].append( [ try_item ] )
else :
h_off[ offset_handler ] = []
h_off[ offset_handler ].append( [ try_item ] )
#print m.get_name(), "\t HANDLER_CATCH_LIST SIZE", handler_catch_list.size, handler_catch_list.get_offset()
for handler_catch in handler_catch_list.get_list() :
if handler_catch.get_off() not in h_off :
continue
for i in h_off[ handler_catch.get_off() ] :
i.append( handler_catch )
exceptions = []
#print m.get_name(), h_off
for i in h_off :
for value in h_off[ i ] :
try_value = value[0]
z = [ try_value.get_start_addr() * 2, (try_value.get_start_addr() * 2) + (try_value.get_insn_count() * 2) - 1 ]
handler_catch = value[1]
if handler_catch.get_size() <= 0 :
z.append( [ "any", handler_catch.get_catch_all_addr() * 2 ] )
for handler in handler_catch.get_handlers() :
z.append( [ vm.get_cm_type( handler.get_type_idx() ), handler.get_addr() * 2 ] )
exceptions.append( z )
#print m.get_name(), exceptions
return exceptions
class HeaderItem :
"""
This class can parse an header_item of a dex file
:param buff: a string which represents a Buff object of the header_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, size, buff, cm) :
self.__CM = cm
self.offset = buff.get_idx()
self.magic = unpack("=Q", buff.read(8))[0]
self.checksum = unpack("=i", buff.read(4))[0]
self.signature = unpack("=20s", buff.read(20))[0]
self.file_size = unpack("=I", buff.read(4))[0]
self.header_size = unpack("=I", buff.read(4))[0]
self.endian_tag = unpack("=I", buff.read(4))[0]
self.link_size = unpack("=I", buff.read(4))[0]
self.link_off = unpack("=I", buff.read(4))[0]
self.map_off = unpack("=I", buff.read(4))[0]
self.string_ids_size = unpack("=I", buff.read(4))[0]
self.string_ids_off = unpack("=I", buff.read(4))[0]
self.type_ids_size = unpack("=I", buff.read(4))[0]
self.type_ids_off = unpack("=I", buff.read(4))[0]
self.proto_ids_size = unpack("=I", buff.read(4))[0]
self.proto_ids_off = unpack("=I", buff.read(4))[0]
self.field_ids_size = unpack("=I", buff.read(4))[0]
self.field_ids_off = unpack("=I", buff.read(4))[0]
self.method_ids_size = unpack("=I", buff.read(4))[0]
self.method_ids_off = unpack("=I", buff.read(4))[0]
self.class_defs_size = unpack("=I", buff.read(4))[0]
self.class_defs_off = unpack("=I", buff.read(4))[0]
self.data_size = unpack("=I", buff.read(4))[0]
self.data_off = unpack("=I", buff.read(4))[0]
self.map_off_obj = None
self.string_off_obj = None
self.type_off_obj = None
self.proto_off_obj = None
self.field_off_obj = None
self.method_off_obj = None
self.class_off_obj = None
self.data_off_obj = None
def reload(self) :
pass
def get_obj(self) :
if self.map_off_obj == None :
self.map_off_obj = self.__CM.get_item_by_offset( self.map_off )
if self.string_off_obj == None :
self.string_off_obj = self.__CM.get_item_by_offset( self.string_ids_off )
if self.type_off_obj == None :
self.type_off_obj = self.__CM.get_item_by_offset( self.type_ids_off )
if self.proto_off_obj == None :
self.proto_off_obj = self.__CM.get_item_by_offset( self.proto_ids_off )
if self.field_off_obj == None :
self.field_off_obj = self.__CM.get_item_by_offset( self.field_ids_off )
if self.method_off_obj == None :
self.method_off_obj = self.__CM.get_item_by_offset( self.method_ids_off )
if self.class_off_obj == None :
self.class_off_obj = self.__CM.get_item_by_offset( self.class_defs_off )
if self.data_off_obj == None :
self.data_off_obj = self.__CM.get_item_by_offset( self.data_off )
self.map_off = self.map_off_obj.get_off()
self.string_ids_size = len(self.string_off_obj)
self.string_ids_off = self.string_off_obj[0].get_off()
self.type_ids_size = len(self.type_off_obj.type)
self.type_ids_off = self.type_off_obj.get_off()
self.proto_ids_size = len(self.proto_off_obj.proto)
self.proto_ids_off = self.proto_off_obj.get_off()
self.field_ids_size = len(self.field_off_obj.elem)
self.field_ids_off = self.field_off_obj.get_off()
self.method_ids_size = len(self.method_off_obj.methods)
self.method_ids_off = self.method_off_obj.get_off()
self.class_defs_size = len(self.class_off_obj.class_def)
self.class_defs_off = self.class_off_obj.get_off()
#self.data_size = len(self.data_off_obj)
self.data_off = self.data_off_obj[0].get_off()
return pack("=Q", self.magic) + \
pack("=I", self.checksum) + \
pack("=20s", self.signature) + \
pack("=I", self.file_size) + \
pack("=I", self.header_size) + \
pack("=I", self.endian_tag) + \
pack("=I", self.link_size) + \
pack("=I", self.link_off) + \
pack("=I", self.map_off) + \
pack("=I", self.string_ids_size) + \
pack("=I", self.string_ids_off) + \
pack("=I", self.type_ids_size) + \
pack("=I", self.type_ids_off) + \
pack("=I", self.proto_ids_size) + \
pack("=I", self.proto_ids_off) + \
pack("=I", self.field_ids_size) + \
pack("=I", self.field_ids_off) + \
pack("=I", self.method_ids_size) + \
pack("=I", self.method_ids_off) + \
pack("=I", self.class_defs_size) + \
pack("=I", self.class_defs_off) + \
pack("=I", self.data_size) + \
pack("=I", self.data_off)
def get_raw(self) :
return self.get_obj()
def get_length(self) :
return len(self.get_raw())
def show(self) :
bytecode._PrintSubBanner("Header Item")
bytecode._PrintDefault("magic=%s, checksum=%s, signature=%s\n" % (self.magic, self.checksum, self.signature))
bytecode._PrintDefault("file_size=%x, header_size=%x, endian_tag=%x\n" % (self.file_size, self.header_size, self.endian_tag))
bytecode._PrintDefault("link_size=%x, link_off=%x\n" % (self.link_size, self.link_off))
bytecode._PrintDefault("map_off=%x\n" % (self.map_off))
bytecode._PrintDefault("string_ids_size=%x, string_ids_off=%x\n" % (self.string_ids_size, self.string_ids_off))
bytecode._PrintDefault("type_ids_size=%x, type_ids_off=%x\n" % (self.type_ids_size, self.type_ids_off))
bytecode._PrintDefault("proto_ids_size=%x, proto_ids_off=%x\n" % (self.proto_ids_size, self.proto_ids_off))
bytecode._PrintDefault("field_ids_size=%x, field_ids_off=%x\n" % (self.field_ids_size, self.field_ids_off))
bytecode._PrintDefault("method_ids_size=%x, method_ids_off=%x\n" % (self.method_ids_size, self.method_ids_off))
bytecode._PrintDefault("class_defs_size=%x, class_defs_off=%x\n" % (self.class_defs_size, self.class_defs_off))
bytecode._PrintDefault("data_size=%x, data_off=%x\n" % (self.data_size, self.data_off))
def set_off(self, off) :
self.offset = off
def get_off(self) :
return self.offset
class AnnotationOffItem :
"""
This class can parse an annotation_off_item of a dex file
:param buff: a string which represents a Buff object of the annotation_off_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.annotation_off = unpack("=I", buff.read( 4 ) )[0]
def show(self) :
bytecode._PrintSubBanner("Annotation Off Item")
bytecode._PrintDefault("annotation_off=0x%x\n" % self.annotation_off)
def get_obj(self) :
if self.annotation_off != 0 :
self.annotation_off = self.__CM.get_obj_by_offset( self.annotation_off ).get_off()
return pack("=I", self.annotation_off)
def get_raw(self) :
return self.get_obj()
def get_length(self) :
return len(self.get_obj())
class AnnotationSetItem :
"""
This class can parse an annotation_set_item of a dex file
:param buff: a string which represents a Buff object of the annotation_set_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.offset = buff.get_idx()
self.annotation_off_item = []
self.size = unpack("=I", buff.read( 4 ) )[0]
for i in xrange(0, self.size) :
self.annotation_off_item.append( AnnotationOffItem(buff, cm) )
def get_annotation_off_item(self) :
"""
Return the offset from the start of the file to an annotation
:rtype: a list of :class:`AnnotationOffItem`
"""
return self.annotation_off_item
def set_off(self, off) :
self.offset = off
def get_off(self) :
return self.offset
def reload(self) :
pass
def show(self) :
bytecode._PrintSubBanner("Annotation Set Item")
for i in self.annotation_off_item :
i.show()
def get_obj(self) :
return pack("=I", self.size)
def get_raw(self) :
return self.get_obj() + ''.join(i.get_raw() for i in self.annotation_off_item)
def get_length(self) :
length = len(self.get_obj())
for i in self.annotation_off_item :
length += i.get_length()
return length
class AnnotationSetRefItem :
"""
This class can parse an annotation_set_ref_item of a dex file
:param buff: a string which represents a Buff object of the annotation_set_ref_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.annotations_off = unpack("=I", buff.read( 4 ) )[0]
def get_annotations_off(self) :
"""
Return the offset from the start of the file to the referenced annotation set or
0 if there are no annotations for this element.
:rtype: int
"""
return self.annotations_off
def show(self) :
bytecode._PrintSubBanner("Annotation Set Ref Item")
bytecode._PrintDefault("annotation_off=0x%x\n" % self.annotation_off)
def get_obj(self) :
if self.annotations_off != 0 :
self.annotations_off = self.__CM.get_obj_by_offset( self.annotations_off ).get_off()
return pack("=I", self.annotations_off)
def get_raw(self) :
return self.get_obj()
class AnnotationSetRefList:
"""
This class can parse an annotation_set_ref_list_item of a dex file
:param buff: a string which represents a Buff object of the annotation_set_ref_list_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.offset = buff.get_idx()
self.__CM = cm
self.list = []
self.size = unpack("=I", buff.read( 4 ) )[0]
for i in xrange(0, self.size) :
self.list.append( AnnotationSetRefItem(buff, cm) )
def get_list(self) :
"""
Return elements of the list
:rtype: :class:`AnnotationSetRefItem`
"""
return self.list
def get_off(self) :
return self.offset
def set_off(self, off) :
self.offset = off
def reload(self) :
pass
def show(self) :
bytecode._PrintSubBanner("Annotation Set Ref List Item")
for i in self.list :
i.show()
def get_obj(self) :
return [ i for i in self.list ]
def get_raw(self) :
return pack("=I", self.size) + ''.join(i.get_raw() for i in self.list)
def get_length(self) :
return len(self.get_raw())
class FieldAnnotation :
"""
This class can parse a field_annotation of a dex file
:param buff: a string which represents a Buff object of the field_annotation
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.offset = buff.get_idx()
self.__CM = cm
self.field_idx = unpack("=I", buff.read( 4 ) )[0]
self.annotations_off = unpack("=I", buff.read( 4 ) )[0]
def get_field_idx(self) :
"""
Return the index into the field_ids list for the identity of the field being annotated
:rtype: int
"""
return self.get_field_idx
def get_annotations_off(self) :
"""
Return the offset from the start of the file to the list of annotations for the field
:rtype: int
"""
return self.annotations_off
def set_off(self, off) :
self.offset = off
def get_off(self) :
return self.offset
def show(self) :
bytecode._PrintSubBanner("Field Annotation")
bytecode._PrintDefault( "field_idx=0x%x annotations_off=0x%x\n" % (self.field_idx, self.annotations_off) )
def get_obj(self) :
if self.annotations_off != 0 :
self.annotations_off = self.__CM.get_obj_by_offset( self.annotations_off ).get_off()
return pack("=I", self.field_idx) + pack("=I", self.annotations_off)
def get_raw(self) :
return self.get_obj()
def get_length(self) :
return len(self.get_raw())
class MethodAnnotation :
"""
This class can parse a method_annotation of a dex file
:param buff: a string which represents a Buff object of the method_annotation
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.offset = buff.get_idx()
self.__CM = cm
self.method_idx = unpack("=I", buff.read( 4 ) )[0]
self.annotations_off = unpack("=I", buff.read( 4 ) )[0]
def get_method_idx(self) :
"""
Return the index into the method_ids list for the identity of the method being annotated
:rtype: int
"""
return self.get_method_idx
def get_annotations_off(self) :
"""
Return the offset from the start of the file to the list of annotations for the method
:rtype: int
"""
return self.annotations_off
def set_off(self, off) :
self.offset = off
def get_off(self) :
return self.offset
def show(self) :
bytecode._PrintSubBanner("Method Annotation")
bytecode._PrintDefault( "method_idx=0x%x annotations_off=0x%x\n" % (self.method_idx, self.annotations_off) )
def get_obj(self) :
if self.annotations_off != 0 :
self.annotations_off = self.__CM.get_obj_by_offset( self.annotations_off ).get_off()
return pack("=I", self.method_idx) + pack("=I", self.annotations_off)
def get_raw(self) :
return self.get_obj()
def get_length(self) :
return len(self.get_raw())
class ParameterAnnotation :
"""
This class can parse a parameter_annotation of a dex file
:param buff: a string which represents a Buff object of the parameter_annotation
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.offset = buff.get_idx()
self.__CM = cm
self.method_idx = unpack("=I", buff.read( 4 ) )[0]
self.annotations_off = unpack("=I", buff.read( 4 ) )[0]
def get_method_idx(self) :
"""
Return the index into the method_ids list for the identity of the method whose parameters are being annotated
:rtype: int
"""
return self.get_method_idx
def get_annotations_off(self) :
"""
Return the offset from the start of the file to the list of annotations for the method parameters
:rtype: int
"""
return self.annotations_off
def set_off(self, off) :
self.offset = off
def get_off(self) :
return self.offset
def show(self) :
bytecode._PrintSubBanner("Parameter Annotation")
bytecode._PrintDefault( "method_idx=0x%x annotations_off=0x%x\n" % (self.method_idx, self.annotations_off) )
def get_obj(self) :
if self.annotations_off != 0 :
self.annotations_off = self.__CM.get_obj_by_offset( self.annotations_off ).get_off()
return pack("=I", self.method_idx) + pack("=I", self.annotations_off)
def get_raw(self):
return self.get_obj()
def get_length(self):
return len(self.get_raw())
class AnnotationsDirectoryItem :
"""
This class can parse an annotations_directory_item of a dex file
:param buff: a string which represents a Buff object of the annotations_directory_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.offset = buff.get_idx()
self.class_annotations_off = unpack("=I", buff.read(4))[0]
self.annotated_fields_size = unpack("=I", buff.read(4))[0]
self.annotated_methods_size = unpack("=I", buff.read(4))[0]
self.annotated_parameters_size = unpack("=I", buff.read(4))[0]
self.field_annotations = []
for i in xrange(0, self.annotated_fields_size) :
self.field_annotations.append( FieldAnnotation( buff, cm ) )
self.method_annotations = []
for i in xrange(0, self.annotated_methods_size) :
self.method_annotations.append( MethodAnnotation( buff, cm ) )
self.parameter_annotations = []
for i in xrange(0, self.annotated_parameters_size) :
self.parameter_annotations.append( ParameterAnnotation( buff, cm ) )
def get_class_annotations_off(self) :
"""
Return the offset from the start of the file to the annotations made directly on the class,
or 0 if the class has no direct annotations
:rtype: int
"""
return self.class_annotations_off
def get_annotated_fields_size(self) :
"""
Return the count of fields annotated by this item
:rtype: int
"""
return self.annotated_fields_size
def get_annotated_methods_size(self) :
"""
Return the count of methods annotated by this item
:rtype: int
"""
return self.annotated_methods_size
def get_annotated_parameters_size(self) :
"""
Return the count of method parameter lists annotated by this item
:rtype: int
"""
return self.annotated_parameters_size
def get_field_annotations(self) :
"""
Return the list of associated field annotations
:rtype: a list of :class:`FieldAnnotation`
"""
return self.field_annotations
def get_method_annotations(self) :
"""
Return the list of associated method annotations
:rtype: a list of :class:`MethodAnnotation`
"""
return self.method_annotations
def get_parameter_annotations(self) :
"""
Return the list of associated method parameter annotations
:rtype: a list of :class:`ParameterAnnotation`
"""
return self.parameter_annotations
def set_off(self, off) :
self.offset = off
def get_off(self) :
return self.offset
def reload(self) :
pass
def show(self) :
bytecode._PrintSubBanner("Annotations Directory Item")
bytecode._PrintDefault("class_annotations_off=0x%x annotated_fields_size=%d annotated_methods_size=%d annotated_parameters_size=%d\n" %
( self.class_annotations_off,
self.annotated_fields_size,
self.annotated_methods_size,
self.annotated_parameters_size))
for i in self.field_annotations :
i.show()
for i in self.method_annotations :
i.show()
for i in self.parameter_annotations :
i.show()
def get_obj(self) :
if self.class_annotations_off != 0 :
self.class_annotations_off = self.__CM.get_obj_by_offset( self.class_annotations_off ).get_off()
return pack("=I", self.class_annotations_off) + \
pack("=I", self.annotated_fields_size) + \
pack("=I", self.annotated_methods_size) + \
pack("=I", self.annotated_parameters_size)
def get_raw(self) :
return self.get_obj() + \
''.join(i.get_raw() for i in self.field_annotations) + \
''.join(i.get_raw() for i in self.method_annotations) + \
''.join(i.get_raw() for i in self.parameter_annotations)
def get_length(self) :
length = len( self.get_obj() )
for i in self.field_annotations :
length += i.get_length()
for i in self.method_annotations :
length += i.get_length()
for i in self.parameter_annotations :
length += i.get_length()
return length
class TypeItem :
"""
This class can parse a type_item of a dex file
:param buff: a string which represents a Buff object of the type_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.type_idx = unpack("=H", buff.read(2))[0]
def get_type_idx(self) :
"""
Return the index into the type_ids list
:rtype: int
"""
return self.type_idx
def get_string(self) :
"""
Return the type string
:rtype: string
"""
return self.__CM.get_type( self.type_idx )
def show(self) :
bytecode._PrintSubBanner("Type Item")
bytecode._PrintDefault("type_idx=%d\n" % self.type_idx)
def get_obj(self) :
return pack("=H", self.type_idx)
def get_raw(self) :
return self.get_obj()
def get_length(self) :
return len(self.get_obj())
class TypeList :
"""
This class can parse a type_list of a dex file
:param buff: a string which represents a Buff object of the type_list
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.offset = buff.get_idx()
self.pad = ""
if self.offset % 4 != 0 :
self.pad = buff.read( self.offset % 4 )
self.len_pad = len(self.pad)
self.size = unpack("=I", buff.read( 4 ) )[0]
self.list = []
for i in xrange(0, self.size) :
self.list.append( TypeItem( buff, cm ) )
def get_pad(self) :
"""
Return the alignment string
:rtype: string
"""
return self.pad
def get_type_list_off(self) :
"""
Return the offset of the item
:rtype: int
"""
return self.offset + self.len_pad
def get_string(self) :
"""
Return the concatenation of all strings
:rtype: string
"""
return ' '.join(i.get_string() for i in self.list)
def get_size(self) :
"""
Return the size of the list, in entries
:rtype: int
"""
return self.size
def get_list(self) :
"""
Return the list of TypeItem
:rtype: a list of :class:`TypeItem` objects
"""
return self.list
def set_off(self, off) :
self.offset = off
def get_off(self) :
return self.offset + self.len_pad
def reload(self) :
pass
def show(self) :
bytecode._PrintSubBanner("Type List")
bytecode._PrintDefault("size=%d\n" % self.size)
for i in self.list :
i.show()
def get_obj(self) :
return self.pad + pack("=I", self.size)
def get_raw(self) :
return self.get_obj() + ''.join(i.get_raw() for i in self.list)
def get_length(self) :
length = len(self.get_obj())
for i in self.list :
length += i.get_length()
return length
DBG_END_SEQUENCE = 0x00 # (none) terminates a debug info sequence for a code_item
DBG_ADVANCE_PC = 0x01 # uleb128 addr_diff addr_diff: amount to add to address register advances the address register without emitting a positions entry
DBG_ADVANCE_LINE = 0x02 # sleb128 line_diff line_diff: amount to change line register by advances the line register without emitting a positions entry
DBG_START_LOCAL = 0x03 # uleb128 register_num
# uleb128p1 name_idx
# uleb128p1 type_idx
# register_num: register that will contain local name_idx: string index of the name
# type_idx: type index of the type introduces a local variable at the current address. Either name_idx or type_idx may be NO_INDEX to indicate that that value is unknown.
DBG_START_LOCAL_EXTENDED = 0x04 # uleb128 register_num uleb128p1 name_idx uleb128p1 type_idx uleb128p1 sig_idx
# register_num: register that will contain local
# name_idx: string index of the name
# type_idx: type index of the type
# sig_idx: string index of the type signature
# introduces a local with a type signature at the current address. Any of name_idx, type_idx, or sig_idx may be NO_INDEX to indicate that that value is unknown. (
# If sig_idx is -1, though, the same data could be represented more efficiently using the opcode DBG_START_LOCAL.)
# Note: See the discussion under "dalvik.annotation.Signature" below for caveats about handling signatures.
DBG_END_LOCAL = 0x05 # uleb128 register_num
# register_num: register that contained local
# marks a currently-live local variable as out of scope at the current address
DBG_RESTART_LOCAL = 0x06 # uleb128 register_num
# register_num: register to restart re-introduces a local variable at the current address.
# The name and type are the same as the last local that was live in the specified register.
DBG_SET_PROLOGUE_END = 0x07 # (none) sets the prologue_end state machine register, indicating that the next position entry that is added should be considered the end of a
# method prologue (an appropriate place for a method breakpoint). The prologue_end register is cleared by any special (>= 0x0a) opcode.
DBG_SET_EPILOGUE_BEGIN = 0x08 # (none) sets the epilogue_begin state machine register, indicating that the next position entry that is added should be considered the beginning
# of a method epilogue (an appropriate place to suspend execution before method exit). The epilogue_begin register is cleared by any special (>= 0x0a) opcode.
DBG_SET_FILE = 0x09 # uleb128p1 name_idx
# name_idx: string index of source file name; NO_INDEX if unknown indicates that all subsequent line number entries make reference to this source file name,
# instead of the default name specified in code_item
DBG_Special_Opcodes_BEGIN = 0x0a # (none) advances the line and address registers, emits a position entry, and clears prologue_end and epilogue_begin. See below for description.
DBG_Special_Opcodes_END = 0xff
DBG_LINE_BASE = -4
DBG_LINE_RANGE = 15
class DBGBytecode :
def __init__(self, cm, op_value) :
self.CM = cm
self.op_value = op_value
self.format = []
def get_op_value(self) :
return self.op_value
def add(self, value, ttype) :
self.format.append( (value, ttype) )
def get_value(self) :
if self.get_op_value() == DBG_START_LOCAL :
return self.CM.get_string(self.format[1][0])
elif self.get_op_value() == DBG_START_LOCAL_EXTENDED :
return self.CM.get_string(self.format[1][0])
return None
def show(self) :
bytecode._PrintSubBanner("DBGBytecode")
bytecode._PrintDefault("op_value=%x format=%s value=%s\n" % (self.op_value, str(self.format), self.get_value()))
def get_obj(self) :
return []
def get_raw(self) :
buff = self.op_value.get_value_buff()
for i in self.format :
if i[1] == "u" :
buff += writeuleb128( i[0] )
elif i[1] == "s" :
buff += writesleb128( i[0] )
return buff
class DebugInfoItem :
def __init__(self, buff, cm) :
self.CM = cm
self.offset = buff.get_idx()
self.line_start = readuleb128( buff )
self.parameters_size = readuleb128( buff )
#print "line", self.line_start, "params", self.parameters_size
self.parameter_names = []
for i in xrange(0, self.parameters_size) :
self.parameter_names.append( readuleb128p1( buff ) )
self.bytecodes = []
bcode = DBGBytecode( self.CM, unpack("=B", buff.read(1))[0] )
self.bytecodes.append( bcode )
while bcode.get_op_value() != DBG_END_SEQUENCE :
bcode_value = bcode.get_op_value()
if bcode_value == DBG_ADVANCE_PC :
bcode.add( readuleb128( buff ), "u" )
elif bcode_value == DBG_ADVANCE_LINE :
bcode.add( readsleb128( buff ), "s" )
elif bcode_value == DBG_START_LOCAL :
bcode.add( readusleb128( buff ), "u" )
bcode.add( readuleb128p1( buff ), "u1" )
bcode.add( readuleb128p1( buff ), "u1" )
elif bcode_value == DBG_START_LOCAL_EXTENDED :
bcode.add( readusleb128( buff ), "u" )
bcode.add( readuleb128p1( buff ), "u1" )
bcode.add( readuleb128p1( buff ), "u1" )
bcode.add( readuleb128p1( buff ), "u1" )
elif bcode_value == DBG_END_LOCAL :
bcode.add( readusleb128( buff ), "u" )
elif bcode_value == DBG_RESTART_LOCAL :
bcode.add( readusleb128( buff ), "u" )
elif bcode_value == DBG_SET_PROLOGUE_END :
pass
elif bcode_value == DBG_SET_EPILOGUE_BEGIN :
pass
elif bcode_value == DBG_SET_FILE :
bcode.add( readuleb128p1( buff ), "u1" )
else : #bcode_value >= DBG_Special_Opcodes_BEGIN and bcode_value <= DBG_Special_Opcodes_END :
pass
bcode = DBGBytecode( self.CM, unpack("=B", buff.read(1))[0] )
self.bytecodes.append( bcode )
def reload(self) :
pass
def get_parameters_size(self) :
return self.parameters_size
def get_line_start(self) :
return self.line_start
def get_parameter_names(self) :
return self.parameter_names
def get_translated_parameter_names(self) :
l = []
for i in self.parameter_names :
if i == -1 :
l.append( None )
else :
l.append( self.CM.get_string( i ) )
return l
def get_bytecodes(self) :
return self.bytecodes
def show(self) :
bytecode._PrintSubBanner("Debug Info Item")
bytecode._PrintDefault("line_start=%d parameters_size=%d\n" % (self.line_start, self.parameters_size))
nb = 0
for i in self.parameter_names :
bytecode._PrintDefault("parameter_names[%d]=%s\n" % (nb, self.CM.get_string( i )))
nb += 1
for i in self.bytecodes :
i.show()
def get_raw(self) :
return [ bytecode.Buff( self.__offset, writeuleb128( self.line_start ) + \
writeuleb128( self.parameters_size ) + \
''.join(writeuleb128(i) for i in self.parameter_names) + \
''.join(i.get_raw() for i in self.bytecodes) ) ]
def get_off(self) :
return self.offset
VALUE_BYTE = 0x00 # (none; must be 0) ubyte[1] signed one-byte integer value
VALUE_SHORT = 0x02 # size - 1 (0..1) ubyte[size] signed two-byte integer value, sign-extended
VALUE_CHAR = 0x03 # size - 1 (0..1) ubyte[size] unsigned two-byte integer value, zero-extended
VALUE_INT = 0x04 # size - 1 (0..3) ubyte[size] signed four-byte integer value, sign-extended
VALUE_LONG = 0x06 # size - 1 (0..7) ubyte[size] signed eight-byte integer value, sign-extended
VALUE_FLOAT = 0x10 # size - 1 (0..3) ubyte[size] four-byte bit pattern, zero-extended to the right, and interpreted as an IEEE754 32-bit floating point value
VALUE_DOUBLE = 0x11 # size - 1 (0..7) ubyte[size] eight-byte bit pattern, zero-extended to the right, and interpreted as an IEEE754 64-bit floating point value
VALUE_STRING = 0x17 # size - 1 (0..3) ubyte[size] unsigned (zero-extended) four-byte integer value, interpreted as an index into the string_ids section and representing a string value
VALUE_TYPE = 0x18 # size - 1 (0..3) ubyte[size] unsigned (zero-extended) four-byte integer value, interpreted as an index into the type_ids section and representing a reflective type/class value
VALUE_FIELD = 0x19 # size - 1 (0..3) ubyte[size] unsigned (zero-extended) four-byte integer value, interpreted as an index into the field_ids section and representing a reflective field value
VALUE_METHOD = 0x1a # size - 1 (0..3) ubyte[size] unsigned (zero-extended) four-byte integer value, interpreted as an index into the method_ids section and representing a reflective method value
VALUE_ENUM = 0x1b # size - 1 (0..3) ubyte[size] unsigned (zero-extended) four-byte integer value, interpreted as an index into the field_ids section and representing the value of an enumerated type constant
VALUE_ARRAY = 0x1c # (none; must be 0) encoded_array an array of values, in the format specified by "encoded_array Format" below. The size of the value is implicit in the encoding.
VALUE_ANNOTATION = 0x1d # (none; must be 0) encoded_annotation a sub-annotation, in the format specified by "encoded_annotation Format" below. The size of the value is implicit in the encoding.
VALUE_NULL = 0x1e # (none; must be 0) (none) null reference value
VALUE_BOOLEAN = 0x1f # boolean (0..1) (none) one-bit value; 0 for false and 1 for true. The bit is represented in the value_arg.
class DebugInfoItemEmpty :
def __init__(self, buff, cm) :
self.__CM = cm
self.offset = buff.get_idx()
self.__buff = buff
self.__raw = ""
def set_off(self, off) :
self.offset = off
def get_off(self) :
return self.offset
def reload(self) :
offset = self.offset
n = self.__CM.get_next_offset_item( offset )
s_idx = self.__buff.get_idx()
self.__buff.set_idx( offset )
self.__raw = self.__buff.read( n - offset )
self.__buff.set_idx( s_idx )
def show(self) :
pass
def get_obj(self) :
return []
def get_raw(self) :
return self.__raw
def get_length(self) :
return len(self.__raw)
class EncodedArray :
"""
This class can parse an encoded_array of a dex file
:param buff: a string which represents a Buff object of the encoded_array
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.offset = buff.get_idx()
self.size = readuleb128( buff )
self.values = []
for i in xrange(0, self.size) :
self.values.append( EncodedValue(buff, cm) )
def get_size(self) :
"""
Return the number of elements in the array
:rtype: int
"""
return self.size
def get_values(self) :
"""
Return a series of size encoded_value byte sequences in the format specified by this section,
concatenated sequentially
:rtype: a list of :class:`EncodedValue` objects
"""
return self.values
def show(self) :
bytecode._PrintSubBanner("Encoded Array")
bytecode._PrintDefault("size=%d\n" % self.size)
for i in self.values :
i.show()
def get_obj(self) :
return writeuleb128( self.size )
def get_raw(self) :
return self.get_obj() + ''.join(i.get_raw() for i in self.values)
def get_length(self) :
length = len(self.get_obj())
for i in self.values :
length += i.get_length()
return length
class EncodedValue :
"""
This class can parse an encoded_value of a dex file
:param buff: a string which represents a Buff object of the encoded_value
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.val = unpack("=B", buff.read(1))[0]
self.value_arg = self.val >> 5
self.value_type = self.val & 0x1f
self.raw_value = None
self.value = ""
# TODO: parse floats/doubles correctly
if self.value_type >= VALUE_SHORT and self.value_type < VALUE_STRING :
self.value, self.raw_value = self._getintvalue(buff.read( self.value_arg + 1 ))
elif self.value_type == VALUE_STRING :
id, self.raw_value = self._getintvalue(buff.read( self.value_arg + 1 ))
self.value = cm.get_raw_string(id)
elif self.value_type == VALUE_TYPE :
id, self.raw_value = self._getintvalue(buff.read( self.value_arg + 1 ))
self.value = cm.get_type(id)
elif self.value_type == VALUE_FIELD :
id, self.raw_value = self._getintvalue(buff.read( self.value_arg + 1 ))
self.value = cm.get_field(id)
elif self.value_type == VALUE_METHOD :
id, self.raw_value = self._getintvalue(buff.read( self.value_arg + 1 ))
self.value = cm.get_method(id)
elif self.value_type == VALUE_ENUM :
id, self.raw_value = self._getintvalue(buff.read( self.value_arg + 1 ))
self.value = cm.get_field(id)
elif self.value_type == VALUE_ARRAY :
self.value = EncodedArray( buff, cm )
elif self.value_type == VALUE_ANNOTATION :
self.value = EncodedAnnotation( buff, cm )
elif self.value_type == VALUE_BYTE :
self.value = buff.read( 1 )
elif self.value_type == VALUE_NULL :
self.value = None
elif self.value_type == VALUE_BOOLEAN :
if self.value_arg:
self.value = True
else:
self.value = False
else :
bytecode.Exit( "Unknown value 0x%x" % self.value_type )
def get_value(self) :
"""
Return the bytes representing the value, variable in length and interpreted differently for different value_type bytes,
though always little-endian
:rtype: an object representing the value
"""
return self.value
def get_value_type(self) :
return self.value_type
def get_value_arg(self) :
return self.value_arg
def _getintvalue(self, buf):
ret = 0
shift = 0
for b in buf:
ret |= ord(b) << shift
shift += 8
return ret, buf
def show(self) :
bytecode._PrintSubBanner("Encoded Value")
bytecode._PrintDefault("val=%x value_arg=%x value_type=%x\n" % (self.val, self.value_arg, self.value_type))
def get_obj(self) :
if isinstance(self.value, str) == False :
return [ self.value ]
return []
def get_raw(self) :
if self.raw_value == None :
return pack("=B", self.val) + bytecode.object_to_str( self.value )
else :
return pack("=B", self.val) + bytecode.object_to_str( self.raw_value )
def get_length(self) :
if self.raw_value == None :
return len(pack("=B", self.val)) + len(bytecode.object_to_str( self.value ))
else :
return len(pack("=B", self.val)) + len(bytecode.object_to_str( self.raw_value ))
class AnnotationElement :
"""
This class can parse an annotation_element of a dex file
:param buff: a string which represents a Buff object of the annotation_element
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.offset = buff.get_idx()
self.name_idx = readuleb128( buff )
self.value = EncodedValue( buff, cm )
def get_name_idx(self) :
"""
Return the element name, represented as an index into the string_ids section
:rtype: int
"""
return self.name_idx
def get_value(self) :
"""
Return the element value (EncodedValue)
:rtype: a :class:`EncodedValue` object
"""
return self.value
def show(self) :
bytecode._PrintSubBanner("Annotation Element")
bytecode._PrintDefault("name_idx=%d\n" % self.name_idx)
self.value.show()
def get_obj(self) :
return writeuleb128(self.name_idx)
def get_raw(self) :
return self.get_obj() + self.value.get_raw()
def get_length(self) :
return len(self.get_obj()) + self.value.get_length()
class EncodedAnnotation :
"""
This class can parse an encoded_annotation of a dex file
:param buff: a string which represents a Buff object of the encoded_annotation
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.offset = buff.get_idx()
self.type_idx = readuleb128( buff )
self.size = readuleb128( buff )
self.elements = []
for i in xrange(0, self.size) :
self.elements.append( AnnotationElement( buff, cm ) )
def get_type_idx(self) :
"""
Return the type of the annotation. This must be a class (not array or primitive) type
:rtype: int
"""
return self.type_idx
def get_size(self) :
"""
Return the number of name-value mappings in this annotation
:rtype:int
"""
return self.size
def get_elements(self) :
"""
Return the elements of the annotation, represented directly in-line (not as offsets)
:rtype: a list of :class:`AnnotationElement` objects
"""
return self.elements
def show(self) :
bytecode._PrintSubBanner("Encoded Annotation")
bytecode._PrintDefault("type_idx=%d size=%d\n" % (self.type_idx, self.size))
for i in self.elements :
i.show()
def get_obj(self) :
return [ i for i in self.elements ]
def get_raw(self) :
return writeuleb128(self.type_idx) + writeuleb128(self.size) + ''.join(i.get_raw() for i in self.elements)
def get_length(self) :
length = len(writeuleb128(self.type_idx) + writeuleb128(self.size))
for i in self.elements :
length += i.get_length()
return length
class AnnotationItem :
"""
This class can parse an annotation_item of a dex file
:param buff: a string which represents a Buff object of the annotation_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.offset = buff.get_idx()
self.visibility = unpack("=B", buff.read(1))[0]
self.annotation = EncodedAnnotation(buff, cm)
def get_visibility(self) :
"""
Return the intended visibility of this annotation
:rtype: int
"""
return self.visibility
def get_annotation(self) :
"""
Return the encoded annotation contents
:rtype: a :class:`EncodedAnnotation` object
"""
return self.annotation
def set_off(self, off) :
self.offset = off
def get_off(self) :
return self.offset
def reload(self) :
pass
def show(self) :
bytecode._PrintSubBanner("Annotation Item")
bytecode._PrintDefault("visibility=%d\n" % self.visibility)
self.annotation.show()
def get_obj(self) :
return [ self.annotation ]
def get_raw(self) :
return pack("=B", self.visibility) + self.annotation.get_raw()
def get_length(self) :
length = len(pack("=B", self.visibility))
length += self.annotation.get_length()
return length
class EncodedArrayItem :
"""
This class can parse an encoded_array_item of a dex file
:param buff: a string which represents a Buff object of the encoded_array_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.offset = buff.get_idx()
self.value = EncodedArray( buff, cm )
def get_value(self) :
"""
Return the bytes representing the encoded array value
:rtype: a :class:`EncodedArray` object
"""
return self.value
def set_off(self, off) :
self.offset = off
def reload(self) :
pass
def get_value(self) :
return self.value
def show(self) :
bytecode._PrintSubBanner("Encoded Array Item")
self.value.show()
def get_obj(self) :
return [ self.value ]
def get_raw(self) :
return self.value.get_raw()
def get_length(self) :
return self.value.get_length()
def get_off(self) :
return self.offset
def utf8_to_string(buff, length):
chars = []
for _ in xrange(length):
first_char = ord(buff.read(1))
value = first_char >> 4
if value in (0x00, 0x01, 0x02, 0x03,
0x04, 0x05, 0x06, 0x07):
if first_char == 0:
warning('at offset %x: single zero byte illegal' % buff.get_idx())
chars.append(chr(first_char))
elif value in (0x0c, 0x0d):
second_char = ord(buff.read(1))
if (second_char & 0xc0) != 0x80:
warning('bad utf8 at offset: %x' % buff.get_idx())
value = ((first_char & 0x1f) << 6) | (second_char & 0x3f)
if value != 0 and value < 0x80:
warning('at offset %x: utf8 should have been represented with one byte encoding' % buff.get_idx())
chars.append(unichr(value))
elif value == 0x0e:
second_char = ord(buff.read(1))
if second_char & 0xc0 != 0x80:
warning('bad utf8 byte %x at offset %x' % (second_char, buff.get_idx()))
third_char = ord(buff.read(1))
if third_char & 0xc0 != 0x80:
warning('bad utf8 byte %x at offset %x' % (third_char, buff.get_idx()))
value = ((first_char & 0x0f) << 12) | ((second_char & 0x3f) << 6) | (third_char & 0x3f)
if value < 0x800:
warning('at offset %x: utf8 should have been represented with two-byte encoding' % buff.get_idx())
chars.append(unichr(value))
else:
warning('at offset %x: illegal utf8' % buff.get_idx())
return ''.join(chars)
class StringDataItem :
"""
This class can parse a string_data_item of a dex file
:param buff: a string which represents a Buff object of the string_data_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.offset = buff.get_idx()
self.utf16_size = readuleb128( buff )
self.data = utf8_to_string(buff, self.utf16_size)
expected = buff.read(1)
if expected != '\x00':
warning('\x00 expected at offset: %x, found: %x' % (buff.get_idx(), expected))
def get_utf16_size(self) :
"""
Return the size of this string, in UTF-16 code units
:rtype:int
"""
return self.utf16_size
def get_data(self) :
"""
Return a series of MUTF-8 code units (a.k.a. octets, a.k.a. bytes) followed by a byte of value 0
:rtype: string
"""
return self.data
def set_off(self, off) :
self.offset = off
def get_off(self) :
return self.offset
def reload(self) :
pass
def get(self) :
return self.data
def show(self) :
bytecode._PrintSubBanner("String Data Item")
bytecode._PrintDefault("utf16_size=%d data=%s\n" % (self.utf16_size, repr( self.data )))
def get_obj(self) :
return []
def get_raw(self) :
return writeuleb128( self.utf16_size ) + self.data
def get_length(self) :
return len(writeuleb128( self.utf16_size )) + len(self.data)
class StringIdItem :
"""
This class can parse a string_id_item of a dex file
:param buff: a string which represents a Buff object of the string_id_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.offset = buff.get_idx()
self.string_data_off = unpack("=I", buff.read(4))[0]
def get_string_data_off(self):
"""
Return the offset from the start of the file to the string data for this item
:rtype: int
"""
return self.string_data_off
def set_off(self, off) :
self.offset = off
def get_off(self) :
return self.offset
def reload(self) :
pass
def show(self) :
bytecode._PrintSubBanner("String Id Item")
bytecode._PrintDefault("string_data_off=%x\n" % self.string_data_off)
def get_obj(self) :
if self.string_data_off != 0 :
self.string_data_off = self.__CM.get_string_by_offset( self.string_data_off ).get_off()
return pack("=I", self.string_data_off)
def get_raw(self) :
return self.get_obj()
def get_length(self) :
return len(self.get_obj())
class TypeIdItem :
"""
This class can parse a type_id_item of a dex file
:param buff: a string which represents a Buff object of the type_id_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.offset = buff.get_idx()
self.descriptor_idx = unpack("=I", buff.read( 4 ) )[0]
self.descriptor_idx_value = None
def get_descriptor_idx(self) :
"""
Return the index into the string_ids list for the descriptor string of this type
:rtype: int
"""
return self.descriptor_idx
def get_descriptor_idx_value(self) :
"""
Return the string associated to the descriptor
:rtype: string
"""
return self.descriptor_idx_value
def reload(self) :
self.descriptor_idx_value = self.__CM.get_string( self.descriptor_idx )
def show(self) :
bytecode._PrintSubBanner("Type Id Item")
bytecode._PrintDefault("descriptor_idx=%d descriptor_idx_value=%s\n" % (self.descriptor_idx, self.descriptor_idx_value))
def get_obj(self) :
return pack("=I", self.descriptor_idx)
def get_raw(self) :
return self.get_obj()
def get_length(self) :
return len(self.get_obj())
class TypeHIdItem :
"""
This class can parse a list of type_id_item of a dex file
:param buff: a string which represents a Buff object of the list of type_id_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, size, buff, cm) :
self.__CM = cm
self.offset = buff.get_idx()
self.type = []
for i in xrange(0, size) :
self.type.append( TypeIdItem( buff, cm ) )
def get_type(self) :
"""
Return the list of type_id_item
:rtype: a list of :class:`TypeIdItem` objects
"""
return self.type
def get(self, idx) :
try :
return self.type[ idx ].get_descriptor_idx()
except IndexError :
return -1
def set_off(self, off) :
self.offset = off
def get_off(self) :
return self.offset
def reload(self) :
for i in self.type :
i.reload()
def show(self) :
bytecode._PrintSubBanner("Type List Item")
for i in self.type :
i.show()
def get_obj(self) :
return [ i for i in self.type ]
def get_raw(self) :
return ''.join(i.get_raw() for i in self.type)
def get_length(self) :
length = 0
for i in self.type :
length += i.get_length()
return length
class ProtoIdItem :
"""
This class can parse a proto_id_item of a dex file
:param buff: a string which represents a Buff object of the proto_id_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.offset = buff.get_idx()
self.shorty_idx = unpack("=I", buff.read(4))[0]
self.return_type_idx = unpack("=I", buff.read(4))[0]
self.parameters_off = unpack("=I", buff.read(4))[0]
self.shorty_idx_value = None
self.return_type_idx_value = None
self.parameters_off_value = None
def reload(self) :
self.shorty_idx_value = self.__CM.get_string( self.shorty_idx )
self.return_type_idx_value = self.__CM.get_type( self.return_type_idx )
self.parameters_off_value = self.__CM.get_type_list( self.parameters_off )
def get_shorty_idx(self) :
"""
Return the index into the string_ids list for the short-form descriptor string of this prototype
:rtype: int
"""
return self.shorty_idx
def get_return_type_idx(self) :
"""
Return the index into the type_ids list for the return type of this prototype
:rtype: int
"""
return self.return_type_idx
def get_parameters_off(self) :
"""
Return the offset from the start of the file to the list of parameter types for this prototype, or 0 if this prototype has no parameters
:rtype: int
"""
return self.parameters_off
def get_shorty_idx_value(self) :
"""
Return the string associated to the shorty_idx
:rtype: string
"""
return self.shorty_idx_value
def get_return_type_idx_value(self) :
"""
Return the string associated to the return_type_idx
:rtype: string
"""
return self.return_type_idx_value
def get_parameters_off_value(self) :
"""
Return the string associated to the parameters_off
:rtype: string
"""
return self.parameters_off_value
def show(self) :
bytecode._PrintSubBanner("Proto Item")
bytecode._PrintDefault("shorty_idx=%d return_type_idx=%d parameters_off=%d\n" % (self.shorty_idx, self.return_type_idx, self.parameters_off))
bytecode._PrintDefault("shorty_idx_value=%s return_type_idx_value=%s parameters_off_value=%s\n" %
(self.shorty_idx_value, self.return_type_idx_value, self.parameters_off_value))
def get_obj(self) :
if self.parameters_off != 0 :
self.parameters_off = self.__CM.get_obj_by_offset( self.parameters_off ).get_off()
return pack("=I", self.shorty_idx) + pack("=I", self.return_type_idx) + pack("=I", self.parameters_off)
def get_raw(self) :
return self.get_obj()
def get_length(self) :
return len(self.get_obj())
class ProtoHIdItem :
"""
This class can parse a list of proto_id_item of a dex file
:param buff: a string which represents a Buff object of the list of proto_id_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, size, buff, cm) :
self.__CM = cm
self.offset = buff.get_idx()
self.proto = []
for i in xrange(0, size) :
self.proto.append( ProtoIdItem(buff, cm) )
def set_off(self, off) :
self.offset = off
def get_off(self) :
return self.offset
def get(self, idx) :
try :
return self.proto[ idx ]
except IndexError :
return ProtoIdItemInvalid()
def reload(self) :
for i in self.proto :
i.reload()
def show(self) :
bytecode._PrintSubBanner("Proto List Item")
for i in self.proto :
i.show()
def get_obj(self) :
return [ i for i in self.proto ]
def get_raw(self) :
return ''.join(i.get_raw() for i in self.proto)
def get_length(self) :
length = 0
for i in self.proto :
length += i.get_length()
return length
class FieldIdItem :
"""
This class can parse a field_id_item of a dex file
:param buff: a string which represents a Buff object of the field_id_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.offset = buff.get_idx()
self.class_idx = unpack("=H", buff.read(2))[0]
self.type_idx = unpack("=H", buff.read(2))[0]
self.name_idx = unpack("=I", buff.read(4))[0]
self.class_idx_value = None
self.type_idx_value = None
self.name_idx_value = None
def reload(self) :
self.class_idx_value = self.__CM.get_type( self.class_idx )
self.type_idx_value = self.__CM.get_type( self.type_idx )
self.name_idx_value = self.__CM.get_string( self.name_idx )
def get_class_idx(self) :
"""
Return the index into the type_ids list for the definer of this field
:rtype: int
"""
return self.class_idx
def get_type_idx(self) :
"""
Return the index into the type_ids list for the type of this field
:rtype: int
"""
return self.type_idx
def get_name_idx(self) :
"""
Return the index into the string_ids list for the name of this field
:rtype: int
"""
return self.name_idx
def get_class_name(self) :
"""
Return the class name of the field
:rtype: string
"""
return self.class_idx_value
def get_type(self) :
"""
Return the type of the field
:rtype: string
"""
return self.type_idx_value
def get_descriptor(self) :
"""
Return the descriptor of the field
:rtype: string
"""
return self.type_idx_value
def get_name(self) :
"""
Return the name of the field
:rtype: string
"""
return self.name_idx_value
def get_list(self) :
return [ self.get_class_name(), self.get_type(), self.get_name() ]
def show(self) :
bytecode._PrintSubBanner("Field Id Item")
bytecode._PrintDefault("class_idx=%d type_idx=%d name_idx=%d\n" % (self.class_idx, self.type_idx, self.name_idx))
bytecode._PrintDefault("class_idx_value=%s type_idx_value=%s name_idx_value=%s\n" % (self.class_idx_value, self.type_idx_value, self.name_idx_value))
def get_obj(self) :
return pack("=H", self.class_idx) + \
pack("=H", self.type_idx) + \
pack("=I", self.name_idx)
def get_raw(self) :
return self.get_obj()
def get_length(self) :
return len(self.get_obj())
class FieldHIdItem :
"""
This class can parse a list of field_id_item of a dex file
:param buff: a string which represents a Buff object of the list of field_id_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, size, buff, cm) :
self.offset = buff.get_idx()
self.elem = []
for i in xrange(0, size) :
self.elem.append( FieldIdItem(buff, cm) )
def set_off(self, off) :
self.offset = off
def get_off(self) :
return self.offset
def gets(self) :
return self.elem
def get(self, idx) :
try :
return self.elem[ idx ]
except IndexError :
return FieldIdItemInvalid()
def reload(self) :
for i in self.elem :
i.reload()
def show(self) :
nb = 0
for i in self.elem :
print nb,
i.show()
nb = nb + 1
def get_obj(self) :
return [ i for i in self.elem ]
def get_raw(self) :
return ''.join(i.get_raw() for i in self.elem)
def get_length(self) :
length = 0
for i in self.elem :
length += i.get_length()
return length
class MethodIdItem :
"""
This class can parse a method_id_item of a dex file
:param buff: a string which represents a Buff object of the method_id_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.offset = buff.get_idx()
self.class_idx = unpack("=H", buff.read(2))[0]
self.proto_idx = unpack("=H", buff.read(2))[0]
self.name_idx = unpack("=I", buff.read(4))[0]
self.class_idx_value = None
self.proto_idx_value = None
self.name_idx_value = None
def reload(self) :
self.class_idx_value = self.__CM.get_type( self.class_idx )
self.proto_idx_value = self.__CM.get_proto( self.proto_idx )
self.name_idx_value = self.__CM.get_string( self.name_idx )
def get_class_idx(self) :
"""
Return the index into the type_ids list for the definer of this method
:rtype: int
"""
return self.class_idx
def get_proto_idx(self) :
"""
Return the index into the proto_ids list for the prototype of this method
:rtype: int
"""
return self.proto_idx
def get_name_idx(self) :
"""
Return the index into the string_ids list for the name of this method
:rtype: int
"""
return self.name_idx
def get_class_name(self) :
"""
Return the class name of the method
:rtype: string
"""
return self.class_idx_value
def get_proto(self) :
"""
Return the prototype of the method
:rtype: string
"""
return self.proto_idx_value
def get_descriptor(self) :
"""
Return the descriptor
:rtype: string
"""
proto = self.get_proto()
return proto[0] + proto[1]
def get_name(self) :
"""
Return the name of the method
:rtype: string
"""
return self.name_idx_value
def get_list(self) :
return [ self.get_class_name(), self.get_name(), self.get_proto() ]
def show(self) :
bytecode._PrintSubBanner("Method Id Item")
bytecode._PrintDefault("class_idx=%d proto_idx=%d name_idx=%d\n" % (self.class_idx, self.proto_idx, self.name_idx))
bytecode._PrintDefault("class_idx_value=%s proto_idx_value=%s name_idx_value=%s\n" % (self.class_idx_value, self.proto_idx_value, self.name_idx_value))
def get_obj(self) :
return pack("H", self.class_idx) + pack("H", self.proto_idx) + pack("I", self.name_idx)
def get_raw(self) :
return self.get_obj()
def get_length(self) :
return len(self.get_obj())
class MethodHIdItem :
"""
This class can parse a list of method_id_item of a dex file
:param buff: a string which represents a Buff object of the list of method_id_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, size, buff, cm) :
self.__CM = cm
self.offset = buff.get_idx()
self.methods = []
for i in xrange(0, size) :
self.methods.append( MethodIdItem(buff, cm) )
def set_off(self, off) :
self.offset = off
def get_off(self) :
return self.offset
def get(self, idx) :
try :
return self.methods[ idx ]
except IndexError :
return MethodIdItemInvalid()
def reload(self) :
for i in self.methods :
i.reload()
def show(self) :
print "METHOD_ID_ITEM"
nb = 0
for i in self.methods :
print nb,
i.show()
nb = nb + 1
def get_obj(self) :
return [ i for i in self.methods ]
def get_raw(self) :
return ''.join(i.get_raw() for i in self.methods)
def get_length(self) :
length = 0
for i in self.methods :
length += i.get_length()
return length
class ProtoIdItemInvalid :
def get_params(self) :
return "AG:IPI:invalid_params;"
def get_shorty(self) :
return "(AG:IPI:invalid_shorty)"
def get_return_type(self) :
return "(AG:IPI:invalid_return_type)"
def show(self) :
print "AG:IPI:invalid_proto_item", self.get_shorty(), self.get_return_type(), self.get_params()
class FieldIdItemInvalid :
def get_class_name(self) :
return "AG:IFI:invalid_class_name;"
def get_type(self) :
return "(AG:IFI:invalid_type)"
def get_descriptor(self) :
return "(AG:IFI:invalid_descriptor)"
def get_name(self) :
return "AG:IFI:invalid_name"
def get_list(self) :
return [ self.get_class_name(), self.get_type(), self.get_name() ]
def show(self) :
print "AG:IFI:invalid_field_item"
class MethodIdItemInvalid :
def get_class_name(self) :
return "AG:IMI:invalid_class_name;"
def get_descriptor(self) :
return "(AG:IMI:invalid_descriptor)"
def get_proto(self) :
return "()AG:IMI:invalid_proto"
def get_name(self) :
return "AG:IMI:invalid_name"
def get_list(self) :
return [ self.get_class_name(), self.get_name(), self.get_proto() ]
def show(self) :
print "AG:IMI:invalid_method_item"
class EncodedField:
"""
This class can parse an encoded_field of a dex file
:param buff: a string which represents a Buff object of the encoded field
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm):
self.CM = cm
self.offset = buff.get_idx()
self.field_idx_diff = readuleb128( buff )
self.access_flags = readuleb128( buff )
self.field_idx = 0
self.name = None
self.proto = None
self.class_name = None
self.init_value = None
self.access_flags_string = None
def reload(self) :
name = self.CM.get_field( self.field_idx )
self.class_name = name[0]
self.name = name[2]
self.proto = ''.join(i for i in name[1])
def set_init_value(self, value) :
"""
Setup the init value object of the field
:param value: the init value
:type value: :class:`EncodedValue`
"""
self.init_value = value
def get_init_value(self) :
"""
Return the init value object of the field
:rtype: :class:`EncodedValue`
"""
return self.init_value
def adjust_idx(self, val) :
self.field_idx = self.field_idx_diff + val
def get_field_idx_diff(self) :
"""
Return the index into the field_ids list for the identity of this field (includes the name and descriptor),
represented as a difference from the index of previous element in the list
:rtype: int
"""
return self.field_idx_diff
def get_field_idx(self) :
"""
Return the real index of the method
:rtype: int
"""
return self.field_idx
def get_access_flags(self) :
"""
Return the access flags of the field
:rtype: int
"""
return self.access_flags
def get_class_name(self) :
"""
Return the class name of the field
:rtype: string
"""
return self.class_name
def get_descriptor(self) :
"""
Return the descriptor of the field
:rtype: string
"""
return self.proto
def get_name(self) :
"""
Return the name of the field
:rtype: string
"""
return self.name
def get_access_flags_string(self) :
"""
Return the access flags string of the field
:rtype: string
"""
if self.access_flags_string == None :
self.access_flags_string = get_access_flags_string( self.get_access_flags() )
if self.access_flags_string == "" :
self.access_flags_string = "0x%x" % self.get_access_flags()
return self.access_flags_string
def set_name(self, value):
self.CM.set_hook_field_name(self, value)
self.reload()
def get_obj(self) :
return []
def get_raw(self) :
return writeuleb128( self.field_idx_diff ) + writeuleb128( self.access_flags )
def get_size(self) :
return len(self.get_raw())
def show(self):
"""
Display the information about the field
"""
colors = bytecode.disable_print_colors()
self.pretty_show()
bytecode.enable_print_colors(colors)
def pretty_show(self) :
"""
Display the information (with a pretty print) about the field
"""
bytecode._PrintSubBanner("Field Information")
bytecode._PrintDefault("%s->%s %s [access_flags=%s]\n" % ( self.get_class_name(), self.get_name(), self.get_descriptor(), self.get_access_flags_string() ))
init_value = self.get_init_value()
if init_value != None :
bytecode._PrintDefault( "\tinit value: %s\n" % str( init_value.get_value() ) )
self.show_dref()
def show_dref(self) :
"""
Display where this field is read or written
"""
try :
bytecode._PrintSubBanner("DREF")
bytecode._PrintDRef("R", self.DREFr.items)
bytecode._PrintDRef("W", self.DREFw.items)
bytecode._PrintSubBanner()
except AttributeError:
pass
class EncodedMethod:
"""
This class can parse an encoded_method of a dex file
:param buff: a string which represents a Buff object of the encoded_method
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.CM = cm
self.offset = buff.get_idx()
self.method_idx_diff = readuleb128( buff ) #: method index diff in the corresponding section
self.access_flags = readuleb128( buff ) #: access flags of the method
self.code_off = readuleb128( buff ) #: offset of the code section
self.method_idx = 0
self.name = None
self.proto = None
self.class_name = None
self.code = None
self.access_flags_string = None
self.notes = []
def adjust_idx(self, val) :
self.method_idx = self.method_idx_diff + val
def get_method_idx(self) :
"""
Return the real index of the method
:rtype: int
"""
return self.method_idx
def get_method_idx_diff(self) :
"""
Return index into the method_ids list for the identity of this method (includes the name and descriptor),
represented as a difference from the index of previous element in the lis
:rtype: int
"""
return self.method_idx_diff
def get_access_flags(self) :
"""
Return the access flags of the method
:rtype: int
"""
return self.access_flags
def get_code_off(self) :
"""
Return the offset from the start of the file to the code structure for this method,
or 0 if this method is either abstract or native
:rtype: int
"""
return self.code_off
def get_access_flags_string(self) :
"""
Return the access flags string of the method
:rtype: string
"""
if self.access_flags_string == None :
self.access_flags_string = get_access_flags_string( self.get_access_flags() )
if self.access_flags_string == "" :
self.access_flags_string = "0x%x" % self.get_access_flags()
return self.access_flags_string
def reload(self) :
v = self.CM.get_method( self.method_idx )
self.class_name = v[0]
self.name = v[1]
self.proto = ''.join(i for i in v[2])
self.code = self.CM.get_code( self.code_off )
def get_locals(self):
ret = self.proto.split(')')
params = ret[0][1:].split()
return self.code.get_registers_size() - len(params) - 1
def get_information(self):
info = {}
if self.code:
nb = self.code.get_registers_size()
proto = self.get_descriptor()
ret = proto.split(')')
params = ret[0][1:].split()
ret = proto.split(')')
params = ret[0][1:].split()
if params:
info["registers"] = (0, nb - len(params) - 1)
j = 0
info["params"] = []
for i in xrange(nb - len(params), nb):
info["params"].append((i, get_type(params[j])))
j += 1
else:
info["registers"] = (0, nb - 1)
info["return"] = get_type(ret[1])
return info
def each_params_by_register(self, nb, proto):
bytecode._PrintSubBanner("Params")
ret = proto.split(')')
params = ret[0][1:].split()
if params:
bytecode._PrintDefault("- local registers: v%d...v%d\n" % (0, nb - len(params) - 1))
j = 0
for i in xrange(nb - len(params), nb):
bytecode._PrintDefault("- v%d: %s\n" % (i, get_type(params[j])))
j += 1
else:
bytecode._PrintDefault("local registers: v%d...v%d\n" % (0, nb - 1))
bytecode._PrintDefault("- return: %s\n" % get_type(ret[1]))
bytecode._PrintSubBanner()
def show_info(self) :
"""
Display the basic information about the method
"""
bytecode._PrintSubBanner("Method Information")
bytecode._PrintDefault("%s->%s%s [access_flags=%s]\n" % ( self.get_class_name(), self.get_name(), self.get_descriptor(), self.get_access_flags_string() ))
def show(self):
"""
Display the information about the method
"""
colors = bytecode.disable_print_colors()
self.pretty_show()
bytecode.enable_print_colors(colors)
def pretty_show(self) :
"""
Display the information (with a pretty print) about the method
"""
self.show_info()
self.show_notes()
if self.code != None :
self.each_params_by_register( self.code.get_registers_size(), self.get_descriptor() )
if self.CM.get_vmanalysis() == None :
self.code.show()
else :
self.code.pretty_show( self.CM.get_vmanalysis().get_method( self ) )
self.show_xref()
def show_xref(self):
"""
Display where the method is called or which method is called
"""
try:
bytecode._PrintSubBanner("XREF")
bytecode._PrintXRef("F", self.XREFfrom.items)
bytecode._PrintXRef("T", self.XREFto.items)
bytecode._PrintSubBanner()
except AttributeError:
pass
def show_notes(self) :
"""
Display the notes about the method
"""
if self.notes != [] :
bytecode._PrintSubBanner("Notes")
for i in self.notes :
bytecode._PrintNote(i)
bytecode._PrintSubBanner()
def source(self):
"""
Return the source code of this method
:rtype: string
"""
self.CM.decompiler_ob.display_source(self)
def get_source(self):
return self.CM.decompiler_ob.get_source_method(self)
def get_length(self) :
"""
Return the length of the associated code of the method
:rtype: int
"""
if self.code != None :
return self.code.get_length()
return 0
def get_code(self) :
"""
Return the code object associated to the method
:rtype: :class:`DalvikCode` object
"""
return self.code
def get_instructions(self) :
"""
Get the instructions
:rtype: a generator of each :class:`Instruction` (or a cached list of instructions if you have setup instructions)
"""
if self.code == None :
return []
return self.code.get_bc().get_instructions()
def set_instructions(self, instructions) :
"""
Set the instructions
:param instructions: the list of instructions
:type instructions: a list of :class:`Instruction`
"""
if self.code == None :
return []
return self.code.get_bc().set_instructions(instructions)
def get_instruction(self, idx, off=None) :
"""
Get a particular instruction by using (default) the index of the address if specified
:param idx: index of the instruction (the position in the list of the instruction)
:type idx: int
:param off: address of the instruction
:type off: int
:rtype: an :class:`Instruction` object
"""
if self._code != None :
return self.code.get_bc().get_instruction(idx, off)
return None
def get_debug(self) :
"""
Return the debug object associated to this method
:rtype: :class:`DebugInfoItem`
"""
if self.code == None :
return None
return self.code.get_debug()
def get_descriptor(self) :
"""
Return the descriptor of the method
:rtype: string
"""
return self.proto
def get_class_name(self) :
"""
Return the class name of the method
:rtype: string
"""
return self.class_name
def get_name(self) :
"""
Return the name of the method
:rtype: string
"""
return self.name
def add_inote(self, msg, idx, off=None) :
"""
Add a message to a specific instruction by using (default) the index of the address if specified
:param msg: the message
:type msg: string
:param idx: index of the instruction (the position in the list of the instruction)
:type idx: int
:param off: address of the instruction
:type off: int
"""
if self.code != None :
self.code.add_inote(msg, idx, off)
def add_note(self, msg) :
"""
Add a message to this method
:param msg: the message
:type msg: string
"""
self.notes.append( msg )
def set_code_idx(self, idx) :
"""
Set the start address of the buffer to disassemble
:param idx: the index
:type idx: int
"""
if self.code != None :
self.code.set_idx( idx )
def set_name(self, value) :
self.CM.set_hook_method_name( self, value )
self.reload()
def get_raw(self) :
if self.code != None :
self.code_off = self.code.get_off()
return writeuleb128( self.method_idx_diff ) + writeuleb128( self.access_flags ) + writeuleb128( self.code_off )
def get_size(self) :
return len(self.get_raw())
class ClassDataItem :
"""
This class can parse a class_data_item of a dex file
:param buff: a string which represents a Buff object of the class_data_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.offset = buff.get_idx()
self.static_fields_size = readuleb128( buff )
self.instance_fields_size = readuleb128( buff )
self.direct_methods_size = readuleb128( buff )
self.virtual_methods_size = readuleb128( buff )
self.static_fields = []
self.instance_fields = []
self.direct_methods = []
self.virtual_methods = []
self._load_elements( self.static_fields_size, self.static_fields, EncodedField, buff, cm )
self._load_elements( self.instance_fields_size, self.instance_fields, EncodedField, buff, cm )
self._load_elements( self.direct_methods_size, self.direct_methods, EncodedMethod, buff, cm )
self._load_elements( self.virtual_methods_size, self.virtual_methods, EncodedMethod, buff, cm )
def get_static_fields_size(self) :
"""
Return the number of static fields defined in this item
:rtype: int
"""
return self.static_fields_size
def get_instance_fields_size(self) :
"""
Return the number of instance fields defined in this item
:rtype: int
"""
return self.instance_fields_size
def get_direct_methods_size(self) :
"""
Return the number of direct methods defined in this item
:rtype: int
"""
return self.direct_methods_size
def get_virtual_methods_size(self) :
"""
Return the number of virtual methods defined in this item
:rtype: int
"""
return self.virtual_methods_size
def get_static_fields(self) :
"""
Return the defined static fields, represented as a sequence of encoded elements
:rtype: a list of :class:`EncodedField` objects
"""
return self.static_fields
def get_instance_fields(self) :
"""
Return the defined instance fields, represented as a sequence of encoded elements
:rtype: a list of :class:`EncodedField` objects
"""
return self.instance_fields
def get_direct_methods(self) :
"""
Return the defined direct (any of static, private, or constructor) methods, represented as a sequence of encoded elements
:rtype: a list of :class:`EncodedMethod` objects
"""
return self.direct_methods
def get_virtual_methods(self) :
"""
Return the defined virtual (none of static, private, or constructor) methods, represented as a sequence of encoded elements
:rtype: a list of :class:`EncodedMethod` objects
"""
return self.virtual_methods
def get_methods(self) :
"""
Return direct and virtual methods
:rtype: a list of :class:`EncodedMethod` objects
"""
return [ x for x in self.direct_methods ] + [ x for x in self.virtual_methods ]
def get_fields(self) :
"""
Return static and instance fields
:rtype: a list of :class:`EncodedField` objects
"""
return [ x for x in self.static_fields ] + [ x for x in self.instance_fields ]
def set_off(self, off) :
self.offset = off
def set_static_fields(self, value) :
if value != None :
values = value.get_values()
if len(values) <= len(self.static_fields) :
for i in xrange(0, len(values)) :
self.static_fields[i].set_init_value( values[i] )
def _load_elements(self, size, l, Type, buff, cm) :
prev = 0
for i in xrange(0, size) :
el = Type(buff, cm)
el.adjust_idx( prev )
if isinstance(el, EncodedField) :
prev = el.get_field_idx()
else :
prev = el.get_method_idx()
l.append( el )
def reload(self) :
for i in self.static_fields :
i.reload()
for i in self.instance_fields :
i.reload()
for i in self.direct_methods :
i.reload()
for i in self.virtual_methods :
i.reload()
def show(self) :
self.pretty_show()
def pretty_show(self) :
bytecode._PrintSubBanner("Class Data Item")
bytecode._PrintDefault("static_fields_size=%d instance_fields_size=%d direct_methods_size=%d virtual_methods_size=%d\n" % \
(self.static_fields_size, self.instance_fields_size, self.direct_methods_size, self.virtual_methods_size))
bytecode._PrintSubBanner("Static Fields")
for i in self.static_fields :
i.show()
bytecode._PrintSubBanner("Instance Fields")
for i in self.instance_fields :
i.show()
bytecode._PrintSubBanner("Direct Methods")
for i in self.direct_methods :
i.pretty_show()
bytecode._PrintSubBanner("Virtual Methods")
for i in self.virtual_methods :
i.pretty_show()
def get_obj(self) :
return [ i for i in self.static_fields ] + \
[ i for i in self.instance_fields ] + \
[ i for i in self.direct_methods ] + \
[ i for i in self.virtual_methods ]
def get_raw(self) :
buff = writeuleb128( self.static_fields_size ) + \
writeuleb128( self.instance_fields_size ) + \
writeuleb128( self.direct_methods_size ) + \
writeuleb128( self.virtual_methods_size ) + \
''.join(i.get_raw() for i in self.static_fields) + \
''.join(i.get_raw() for i in self.instance_fields) + \
''.join(i.get_raw() for i in self.direct_methods) + \
''.join(i.get_raw() for i in self.virtual_methods)
return buff
def get_length(self) :
length = len(writeuleb128( self.static_fields_size )) + \
len(writeuleb128( self.instance_fields_size )) + \
len(writeuleb128( self.direct_methods_size )) + \
len(writeuleb128( self.virtual_methods_size ))
for i in self.static_fields :
length += i.get_size()
for i in self.instance_fields :
length += i.get_size()
for i in self.direct_methods :
length += i.get_size()
for i in self.virtual_methods :
length += i.get_size()
return length
def get_off(self) :
return self.offset
class ClassDefItem:
"""
This class can parse a class_def_item of a dex file
:param buff: a string which represents a Buff object of the class_def_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm):
self.__CM = cm
self.offset = buff.get_idx()
self.class_idx = unpack("=I", buff.read(4))[0]
self.access_flags = unpack("=I", buff.read(4))[0]
self.superclass_idx = unpack("=I", buff.read(4))[0]
self.interfaces_off = unpack("=I", buff.read(4))[0]
self.source_file_idx = unpack("=I", buff.read(4))[0]
self.annotations_off = unpack("=I", buff.read(4))[0]
self.class_data_off = unpack("=I", buff.read(4))[0]
self.static_values_off = unpack("=I", buff.read(4))[0]
self.interfaces = None
self.class_data_item = None
self.static_values = None
self.name = None
self.sname = None
self.access_flags_string = None
def reload(self) :
self.name = self.__CM.get_type( self.class_idx )
self.sname = self.__CM.get_type( self.superclass_idx )
if self.interfaces_off != 0 :
self.interfaces = self.__CM.get_type_list( self.interfaces_off )
if self.class_data_off != 0 :
self.class_data_item = self.__CM.get_class_data_item( self.class_data_off )
self.class_data_item.reload()
if self.static_values_off != 0 :
self.static_values = self.__CM.get_encoded_array_item ( self.static_values_off )
if self.class_data_item != None :
self.class_data_item.set_static_fields( self.static_values.get_value() )
def get_methods(self) :
"""
Return all methods of this class
:rtype: a list of :class:`EncodedMethod` objects
"""
if self.class_data_item != None :
return self.class_data_item.get_methods()
return []
def get_fields(self) :
"""
Return all fields of this class
:rtype: a list of :class:`EncodedField` objects
"""
if self.class_data_item != None :
return self.class_data_item.get_fields()
return []
def get_class_idx(self) :
"""
Return the index into the type_ids list for this class
:rtype: int
"""
return self.class_idx
def get_access_flags(self) :
"""
Return the access flags for the class (public, final, etc.)
:rtype: int
"""
return self.access_flags
def get_superclass_idx(self) :
"""
Return the index into the type_ids list for the superclass
:rtype: int
"""
return self.superclass_idx
def get_interfaces_off(self) :
"""
Return the offset from the start of the file to the list of interfaces, or 0 if there are none
:rtype: int
"""
return self.interfaces_off
def get_source_file_idx(self) :
"""
Return the index into the string_ids list for the name of the file containing the original
source for (at least most of) this class, or the special value NO_INDEX to represent a lack of this information
:rtype: int
"""
return self.source_file_idx
def get_annotations_off(self) :
"""
Return the offset from the start of the file to the annotations structure for this class,
or 0 if there are no annotations on this class.
:rtype: int
"""
return self.annotations_off
def get_class_data_off(self) :
"""
Return the offset from the start of the file to the associated class data for this item,
or 0 if there is no class data for this class
:rtype: int
"""
return self.class_data_off
def get_static_values_off(self) :
"""
Return the offset from the start of the file to the list of initial values for static fields,
or 0 if there are none (and all static fields are to be initialized with 0 or null)
:rtype: int
"""
return self.static_values_off
def get_class_data(self) :
"""
Return the associated class_data_item
:rtype: a :class:`ClassDataItem` object
"""
return self.class_data_item
def get_name(self) :
"""
Return the name of this class
:rtype: int
"""
return self.name
def get_superclassname(self) :
"""
Return the name of the super class
:rtype: string
"""
return self.sname
def get_interfaces(self) :
"""
Return the name of the interface
:rtype: string
"""
return self.interfaces
def get_access_flags_string(self) :
"""
Return the access flags string of the class
:rtype: string
"""
if self.access_flags_string == None :
self.access_flags_string = get_access_flags_string( self.get_access_flags() )
if self.access_flags_string == "" :
self.access_flags_string = "0x%x" % self.get_access_flags()
return self.access_flags_string
def show(self):
bytecode._PrintSubBanner("Class Def Item")
bytecode._PrintDefault("name=%s, sname=%s, interfaces=%s, access_flags=%s\n" %
(self.name,
self.sname,
self.interfaces,
self.get_access_flags_string()))
bytecode._PrintDefault("class_idx=%d, superclass_idx=%d, interfaces_off=%x, source_file_idx=%d, annotations_off=%x, class_data_off=%x, static_values_off=%x\n" %
(self.class_idx,
self.superclass_idx,
self.interfaces_off,
self.source_file_idx,
self.annotations_off,
self.class_data_off,
self.static_values_off))
self.show_xref()
def show_xref(self):
"""
Display where the method is called or which method is called
"""
try:
bytecode._PrintSubBanner("XREF")
bytecode._PrintXRef("F", self.XREFfrom.items)
bytecode._PrintSubBanner()
except AttributeError:
pass
def source(self):
"""
Return the source code of the entire class
:rtype: string
"""
self.__CM.decompiler_ob.display_all(self)
def get_source(self):
return self.__CM.decompiler_ob.get_source_class(self)
def set_name(self, value) :
self.__CM.set_hook_class_name( self, value )
def get_obj(self) :
if self.interfaces_off != 0 :
self.interfaces_off = self.__CM.get_obj_by_offset( self.interfaces_off ).get_off()
if self.annotations_off != 0 :
self.annotations_off = self.__CM.get_obj_by_offset( self.annotations_off ).get_off()
if self.class_data_off != 0 :
self.class_data_off = self.__CM.get_obj_by_offset( self.class_data_off ).get_off()
if self.static_values_off != 0 :
self.static_values_off = self.__CM.get_obj_by_offset( self.static_values_off ).get_off()
return pack("=I", self.class_idx) + \
pack("=I", self.access_flags) + \
pack("=I", self.superclass_idx) + \
pack("=I", self.interfaces_off) + \
pack("=I", self.source_file_idx) + \
pack("=I", self.annotations_off) + \
pack("=I", self.class_data_off) + \
pack("=I", self.static_values_off)
def get_raw(self) :
return self.get_obj()
def get_length(self) :
return len(self.get_obj())
class ClassHDefItem :
"""
This class can parse a list of class_def_item of a dex file
:param buff: a string which represents a Buff object of the list of class_def_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, size, buff, cm) :
self.__CM = cm
self.offset = buff.get_idx()
self.class_def = []
for i in xrange(0, size) :
idx = buff.get_idx()
class_def = ClassDefItem( buff, cm )
self.class_def.append( class_def )
buff.set_idx( idx + calcsize("=IIIIIIII") )
def set_off(self, off) :
self.offset = off
def get_off(self) :
return self.offset
def get_class_idx(self, idx) :
for i in self.class_def :
if i.get_class_idx() == idx :
return i
return None
def get_method(self, name_class, name_method) :
l = []
for i in self.class_def :
if i.get_name() == name_class :
for j in i.get_methods() :
if j.get_name() == name_method :
l.append(j)
return l
def get_names(self) :
return [ x.get_name() for x in self.class_def ]
def reload(self) :
for i in self.class_def :
i.reload()
def show(self) :
for i in self.class_def :
i.show()
def get_obj(self) :
return [ i for i in self.class_def ]
def get_raw(self) :
return ''.join(i.get_raw() for i in self.class_def)
def get_length(self) :
length = 0
for i in self.class_def :
length += i.get_length()
return length
class EncodedTypeAddrPair :
"""
This class can parse an encoded_type_addr_pair of a dex file
:param buff: a string which represents a Buff object of the encoded_type_addr_pair
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff) :
self.type_idx = readuleb128( buff )
self.addr = readuleb128( buff )
def get_type_idx(self) :
"""
Return the index into the type_ids list for the type of the exception to catch
:rtype: int
"""
return self.type_idx
def get_addr(self) :
"""
Return the bytecode address of the associated exception handler
:rtype: int
"""
return self.addr
def get_obj(self) :
return []
def show(self) :
bytecode._PrintSubBanner("Encoded Type Addr Pair")
bytecode._PrintDefault("type_idx=%d addr=%x\n" % (self.type_idx, self.addr))
def get_raw(self) :
return writeuleb128( self.type_idx ) + writeuleb128( self.addr )
def get_length(self) :
return len(self.get_raw())
class EncodedCatchHandler :
"""
This class can parse an encoded_catch_handler of a dex file
:param buff: a string which represents a Buff object of the encoded_catch_handler
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.offset = buff.get_idx()
self.size = readsleb128( buff )
self.handlers = []
for i in xrange(0, abs(self.size)) :
self.handlers.append( EncodedTypeAddrPair(buff) )
if self.size <= 0 :
self.catch_all_addr = readuleb128( buff )
def get_size(self) :
"""
Return the number of catch types in this list
:rtype: int
"""
return self.size
def get_handlers(self) :
"""
Return the stream of abs(size) encoded items, one for each caught type, in the order that the types should be tested.
:rtype: a list of :class:`EncodedTypeAddrPair` objects
"""
return self.handlers
def get_catch_all_addr(self) :
"""
Return the bytecode address of the catch-all handler. This element is only present if size is non-positive.
:rtype: int
"""
return self.catch_all_addr
def get_off(self) :
return self.offset
def set_off(self, off) :
self.offset = off
def show(self) :
bytecode._PrintSubBanner("Encoded Catch Handler")
bytecode._PrintDefault("size=%d\n" % self.size)
for i in self.handlers :
i.show()
if self.size <= 0 :
bytecode._PrintDefault("catch_all_addr=%x\n" % self.catch_all_addr)
def get_raw(self) :
buff = writesleb128( self.size ) + ''.join(i.get_raw() for i in self.handlers)
if self.size <= 0 :
buff += writeuleb128( self.catch_all_addr )
return buff
def get_length(self) :
length = len(writesleb128( self.size ))
for i in self.handlers :
length += i.get_length()
if self.size <= 0 :
length += len(writeuleb128( self.catch_all_addr ))
return length
class EncodedCatchHandlerList :
"""
This class can parse an encoded_catch_handler_list of a dex file
:param buff: a string which represents a Buff object of the encoded_catch_handler_list
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.offset = buff.get_idx()
self.size = readuleb128( buff )
self.list = []
for i in xrange(0, self.size) :
self.list.append( EncodedCatchHandler(buff, cm) )
def get_size(self) :
"""
Return the size of this list, in entries
:rtype: int
"""
return self.size
def get_list(self) :
"""
Return the actual list of handler lists, represented directly (not as offsets), and concatenated sequentially
:rtype: a list of :class:`EncodedCatchHandler` objects
"""
return self.list
def show(self) :
bytecode._PrintSubBanner("Encoded Catch Handler List")
bytecode._PrintDefault("size=%d\n" % self.size)
for i in self.list :
i.show()
def get_off(self) :
return self.offset
def set_off(self, off) :
self.offset = off
def get_obj(self) :
return writeuleb128( self.size )
def get_raw(self) :
return self.get_obj() + ''.join(i.get_raw() for i in self.list)
def get_length(self) :
length = len(self.get_obj())
for i in self.list :
length += i.get_length()
return length
KIND_METH = 0
KIND_STRING = 1
KIND_FIELD = 2
KIND_TYPE = 3
VARIES = 4
INLINE_METHOD = 5
VTABLE_OFFSET = 6
FIELD_OFFSET = 7
KIND_RAW_STRING = 8
OPERAND_REGISTER = 0
OPERAND_LITERAL = 1
OPERAND_RAW = 2
OPERAND_OFFSET = 3
OPERAND_KIND = 0x100
def get_kind(cm, kind, value):
"""
Return the value of the 'kind' argument
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
:param kind: the type of the 'kind' argument
:type kind: int
:param value: the value of the 'kind' argument
:type value: int
:rtype: string
"""
if kind == KIND_METH:
method = cm.get_method_ref(value)
class_name = method.get_class_name()
name = method.get_name()
descriptor = method.get_descriptor()
return "%s->%s%s" % (class_name, name, descriptor)
elif kind == KIND_STRING:
return repr(cm.get_string(value))
elif kind == KIND_RAW_STRING:
return cm.get_string(value)
elif kind == KIND_FIELD:
class_name, proto, field_name = cm.get_field(value)
return "%s->%s %s" % (class_name, field_name, proto)
elif kind == KIND_TYPE:
return cm.get_type(value)
elif kind == VTABLE_OFFSET:
return "vtable[0x%x]" % value
elif kind == FIELD_OFFSET:
return "field[0x%x]" % value
elif kind == INLINE_METHOD:
buff = "inline[0x%x]" % value
# FIXME: depends of the android version ...
if len(INLINE_METHODS) > value:
elem = INLINE_METHODS[value]
buff += " %s->%s%s" % (elem[0], elem[1], elem[2])
return buff
return None
class Instruction(object):
"""
This class represents a dalvik instruction
"""
def get_kind(self):
"""
Return the 'kind' argument of the instruction
:rtype: int
"""
if self.OP > 0xff:
if self.OP >= 0xf2ff:
return DALVIK_OPCODES_OPTIMIZED[self.OP][1][1]
return DALVIK_OPCODES_EXTENDED_WIDTH[self.OP][1][1]
return DALVIK_OPCODES_FORMAT[self.OP][1][1]
def get_name(self):
"""
Return the name of the instruction
:rtype: string
"""
if self.OP > 0xff:
if self.OP >= 0xf2ff:
return DALVIK_OPCODES_OPTIMIZED[self.OP][1][0]
return DALVIK_OPCODES_EXTENDED_WIDTH[self.OP][1][0]
return DALVIK_OPCODES_FORMAT[self.OP][1][0]
def get_op_value(self):
"""
Return the value of the opcode
:rtype: int
"""
return self.OP
def get_literals(self):
"""
Return the associated literals
:rtype: list of int
"""
return []
def show(self, idx):
"""
Print the instruction
"""
print self.get_name() + " " + self.get_output(idx),
def show_buff(self, idx):
"""
Return the display of the instruction
:rtype: string
"""
return self.get_output(idx)
def get_translated_kind(self):
"""
Return the translated value of the 'kind' argument
:rtype: string
"""
return get_kind(self.cm, self.get_kind(), self.get_ref_kind())
def get_output(self, idx=-1):
"""
Return an additional output of the instruction
:rtype: string
"""
raise("not implemented")
def get_operands(self, idx=-1):
"""
Return all operands
:rtype: list
"""
raise("not implemented")
def get_length(self):
"""
Return the length of the instruction
:rtype: int
"""
raise("not implemented")
def get_raw(self):
"""
Return the object in a raw format
:rtype: string
"""
raise("not implemented")
def get_ref_kind(self):
"""
Return the value of the 'kind' argument
:rtype: value
"""
raise("not implemented")
def get_formatted_operands(self):
return None
class InstructionInvalid(Instruction):
"""
This class represents an invalid instruction
"""
def __init__(self, cm, buff):
super(InstructionInvalid, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
#debug("OP:%x" % (self.OP))
def get_name(self):
"""
Return the name of the instruction
:rtype: string
"""
return "AG:invalid_instruction"
def get_output(self, idx=-1):
return "(OP:%x)" % self.OP
def get_operands(self, idx=-1):
return []
def get_length(self):
return 2
def get_raw(self):
return pack("=H", self.OP)
class FillArrayData:
"""
This class can parse a FillArrayData instruction
:param buff: a Buff object which represents a buffer where the instruction is stored
"""
def __init__(self, buff):
self.notes = []
self.format_general_size = calcsize("=HHI")
self.ident = unpack("=H", buff[0:2])[0]
self.element_width = unpack("=H", buff[2:4])[0]
self.size = unpack("=I", buff[4:8])[0]
self.data = buff[self.format_general_size:self.format_general_size + (self.size * self.element_width) + 1]
def add_note(self, msg):
"""
Add a note to this instruction
:param msg: the message
:type msg: objects (string)
"""
self.notes.append(msg)
def get_notes(self):
"""
Get all notes from this instruction
:rtype: a list of objects
"""
return self.notes
def get_op_value(self):
"""
Get the value of the opcode
:rtype: int
"""
return self.ident
def get_data(self):
"""
Return the data of this instruction (the payload)
:rtype: string
"""
return self.data
def get_output(self, idx=-1):
"""
Return an additional output of the instruction
:rtype: string
"""
buff = ""
data = self.get_data()
buff += repr(data) + " | "
for i in xrange(0, len(data)):
buff += "\\x%02x" % ord(data[i])
return buff
def get_operands(self, idx=-1):
return [(OPERAND_RAW, repr(self.get_data()))]
def get_formatted_operands(self):
return None
def get_name(self):
"""
Return the name of the instruction
:rtype: string
"""
return "fill-array-data-payload"
def show_buff(self, pos):
"""
Return the display of the instruction
:rtype: string
"""
buff = self.get_name() + " "
for i in xrange(0, len(self.data)):
buff += "\\x%02x" % ord(self.data[i])
return buff
def show(self, pos):
"""
Print the instruction
"""
print self.show_buff(pos),
def get_length(self):
"""
Return the length of the instruction
:rtype: int
"""
return ((self.size * self.element_width + 1) / 2 + 4) * 2
def get_raw(self):
return pack("=H", self.ident) + pack("=H", self.element_width) + pack("=I", self.size) + self.data
class SparseSwitch:
"""
This class can parse a SparseSwitch instruction
:param buff: a Buff object which represents a buffer where the instruction is stored
"""
def __init__(self, buff):
self.notes = []
self.format_general_size = calcsize("=HH")
self.ident = unpack("=H", buff[0:2])[0]
self.size = unpack("=H", buff[2:4])[0]
self.keys = []
self.targets = []
idx = self.format_general_size
for i in xrange(0, self.size):
self.keys.append(unpack('=l', buff[idx:idx + 4])[0])
idx += 4
for i in xrange(0, self.size):
self.targets.append(unpack('=l', buff[idx:idx + 4])[0])
idx += 4
def add_note(self, msg):
"""
Add a note to this instruction
:param msg: the message
:type msg: objects (string)
"""
self.notes.append(msg)
def get_notes(self):
"""
Get all notes from this instruction
:rtype: a list of objects
"""
return self.notes
def get_op_value(self):
"""
Get the value of the opcode
:rtype: int
"""
return self.ident
def get_keys(self):
"""
Return the keys of the instruction
:rtype: a list of long
"""
return self.keys
def get_values(self):
return self.get_keys()
def get_targets(self):
"""
Return the targets (address) of the instruction
:rtype: a list of long
"""
return self.targets
def get_output(self, idx=-1):
"""
Return an additional output of the instruction
:rtype: string
"""
return " ".join("%x" % i for i in self.keys)
def get_operands(self, idx=-1):
"""
Return an additional output of the instruction
:rtype: string
"""
return []
def get_formatted_operands(self):
return None
def get_name(self):
"""
Return the name of the instruction
:rtype: string
"""
return "sparse-switch-payload"
def show_buff(self, pos):
"""
Return the display of the instruction
:rtype: string
"""
buff = self.get_name() + " "
for i in xrange(0, len(self.keys)):
buff += "%x:%x " % (self.keys[i], self.targets[i])
return buff
def show(self, pos):
"""
Print the instruction
"""
print self.show_buff(pos),
def get_length(self):
return self.format_general_size + (self.size * calcsize('<L')) * 2
def get_raw(self):
return pack("=H", self.ident) + pack("=H", self.size) + ''.join(pack("=l", i) for i in self.keys) + ''.join(pack("=l", i) for i in self.targets)
class PackedSwitch:
"""
This class can parse a PackedSwitch instruction
:param buff: a Buff object which represents a buffer where the instruction is stored
"""
def __init__(self, buff):
self.notes = []
self.format_general_size = calcsize("=HHI")
self.ident = unpack("=H", buff[0:2])[0]
self.size = unpack("=H", buff[2:4])[0]
self.first_key = unpack("=i", buff[4:8])[0]
self.targets = []
idx = self.format_general_size
max_size = self.size
if (max_size * 4) > len(buff):
max_size = len(buff) - idx - 8
for i in xrange(0, max_size):
self.targets.append(unpack('=l', buff[idx:idx + 4])[0])
idx += 4
def add_note(self, msg):
"""
Add a note to this instruction
:param msg: the message
:type msg: objects (string)
"""
self.notes.append(msg)
def get_notes(self):
"""
Get all notes from this instruction
:rtype: a list of objects
"""
return self.notes
def get_op_value(self):
"""
Get the value of the opcode
:rtype: int
"""
return self.ident
def get_keys(self):
"""
Return the keys of the instruction
:rtype: a list of long
"""
return [(self.first_key + i) for i in range(0, len(self.targets))]
def get_values(self):
return self.get_keys()
def get_targets(self):
"""
Return the targets (address) of the instruction
:rtype: a list of long
"""
return self.targets
def get_output(self, idx=-1):
"""
Return an additional output of the instruction
:rtype: string
"""
return " ".join("%x" % (self.first_key + i) for i in range(0, len(self.targets)))
def get_operands(self, idx=-1):
"""
Return an additional output of the instruction
:rtype: string
"""
return []
def get_formatted_operands(self):
return None
def get_name(self):
"""
Return the name of the instruction
:rtype: string
"""
return "packed-switch-payload"
def show_buff(self, pos):
"""
Return the display of the instruction
:rtype: string
"""
buff = self.get_name() + " "
buff += "%x:" % self.first_key
for i in self.targets:
buff += " %x" % i
return buff
def show(self, pos):
"""
Print the instruction
"""
print self.show_buff(pos),
def get_length(self):
return self.format_general_size + (self.size * calcsize('=L'))
def get_raw(self):
return pack("=H", self.ident) + pack("=H", self.size) + pack("=i", self.first_key) + ''.join(pack("=l", i) for i in self.targets)
class Instruction35c(Instruction):
"""
This class represents all instructions which have the 35c format
"""
def __init__(self, cm, buff):
super(Instruction35c, self).__init__()
self.cm = cm
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.G = (i16 >> 8) & 0xf
self.A = (i16 >> 12) & 0xf
self.BBBB = unpack("=H", buff[2:4])[0]
i16 = unpack("=H", buff[4:6])[0]
self.C = i16 & 0xf
self.D = (i16 >> 4) & 0xf
self.E = (i16 >> 8) & 0xf
self.F = (i16 >> 12) & 0xf
def get_output(self, idx=-1):
buff = ""
kind = get_kind(self.cm, self.get_kind(), self.BBBB)
if self.A == 0:
buff += "%s" % (kind)
elif self.A == 1:
buff += "v%d, %s" % (self.C, kind)
elif self.A == 2:
buff += "v%d, v%d, %s" % (self.C, self.D, kind)
elif self.A == 3:
buff += "v%d, v%d, v%d, %s" % (self.C, self.D, self.E, kind)
elif self.A == 4:
buff += "v%d, v%d, v%d, v%d, %s" % (self.C, self.D, self.E, self.F, kind)
elif self.A == 5:
buff += "v%d, v%d, v%d, v%d, v%d, %s" % (self.C, self.D, self.E, self.F, self.G, kind)
return buff
def get_operands(self, idx=-1):
l = []
kind = get_kind(self.cm, self.get_kind(), self.BBBB)
if self.A == 0:
l.append((self.get_kind() + OPERAND_KIND, self.BBBB, kind))
elif self.A == 1:
l.extend([(OPERAND_REGISTER, self.C), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)])
elif self.A == 2:
l.extend([(OPERAND_REGISTER, self.C), (OPERAND_REGISTER, self.D), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)])
elif self.A == 3:
l.extend([(OPERAND_REGISTER, self.C), (OPERAND_REGISTER, self.D), (OPERAND_REGISTER, self.E), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)])
elif self.A == 4:
l.extend([(OPERAND_REGISTER, self.C), (OPERAND_REGISTER, self.D), (OPERAND_REGISTER, self.E), (OPERAND_REGISTER, self.F), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)])
elif self.A == 5:
l.extend([(OPERAND_REGISTER, self.C), (OPERAND_REGISTER, self.D), (OPERAND_REGISTER, self.E), (OPERAND_REGISTER, self.F), (OPERAND_REGISTER, self.G), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)])
return l
def get_length(self):
return 6
def get_ref_kind(self):
return self.BBBB
def get_raw(self):
return pack("=HHH", (self.A << 12) | (self.G << 8) | self.OP, self.BBBB, (self.F << 12) | (self.E << 8) | (self.D << 4) | self.C)
class Instruction10x(Instruction):
"""
This class represents all instructions which have the 10x format
"""
def __init__(self, cm, buff):
super(Instruction10x, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
#log_andro.debug("OP:%x %s" % (self.OP, args[0]))
def get_output(self, idx=-1):
return ""
def get_operands(self, idx=-1):
return []
def get_length(self):
return 2
def get_raw(self):
return pack("=H", self.OP)
class Instruction21h(Instruction):
"""
This class represents all instructions which have the 21h format
"""
def __init__(self, cm, buff):
super(Instruction21h, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBB = unpack("=h", buff[2:4])[0]
#log_andro.debug("OP:%x %s AA:%x BBBBB:%x" % (self.OP, args[0], self.AA, self.BBBB))
self.formatted_operands = []
if self.OP == 0x15:
self.formatted_operands.append(unpack('=f', '\x00\x00' + pack('=h', self.BBBB))[0])
elif self.OP == 0x19:
self.formatted_operands.append(unpack('=d', '\x00\x00\x00\x00\x00\x00' + pack('=h', self.BBBB))[0])
def get_length(self):
return 4
def get_output(self, idx=-1):
buff = ""
buff += "v%d, %d" % (self.AA, self.BBBB)
if self.formatted_operands != []:
buff += " # %s" % (str(self.formatted_operands))
return buff
def get_operands(self, idx=-1):
return [(OPERAND_REGISTER, self.AA), (OPERAND_LITERAL, self.BBBB)]
def get_formatted_operands(self):
return self.formatted_operands
def get_literals(self):
return [self.BBBB]
def get_raw(self):
return pack("=Hh", (self.AA << 8) | self.OP, self.BBBB)
class Instruction11n(Instruction):
"""
This class represents all instructions which have the 11n format
"""
def __init__(self, cm, buff):
super(Instruction11n, self).__init__()
i16 = unpack("=h", buff[0:2])[0]
self.OP = i16 & 0xff
self.A = (i16 >> 8) & 0xf
self.B = (i16 >> 12)
#log_andro.debug("OP:%x %s A:%x B:%x" % (self.OP, args[0], self.A, self.B))
def get_output(self, idx=-1):
buff = ""
buff += "v%d, %d" % (self.A, self.B)
return buff
def get_operands(self, idx=-1):
return [(OPERAND_REGISTER, self.A), (OPERAND_LITERAL, self.B)]
def get_literals(self):
return [self.B]
def get_length(self):
return 2
def get_raw(self):
return pack("=h", (self.B << 12) | (self.A << 8) | self.OP)
class Instruction21c(Instruction):
"""
This class represents all instructions which have the 21c format
"""
def __init__(self, cm, buff):
super(Instruction21c, self).__init__()
self.cm = cm
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBB = unpack("=H", buff[2:4])[0]
#log_andro.debug("OP:%x %s AA:%x BBBBB:%x" % (self.OP, args[0], self.AA, self.BBBB))
def get_length(self):
return 4
def get_output(self, idx=-1):
buff = ""
kind = get_kind(self.cm, self.get_kind(), self.BBBB)
buff += "v%d, %s" % (self.AA, kind)
return buff
def get_operands(self, idx=-1):
kind = get_kind(self.cm, self.get_kind(), self.BBBB)
return [(OPERAND_REGISTER, self.AA), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)]
def get_ref_kind(self):
return self.BBBB
def get_string(self):
return get_kind(self.cm, self.get_kind(), self.BBBB)
def get_raw_string(self):
return get_kind(self.cm, KIND_RAW_STRING, self.BBBB)
def get_raw(self):
return pack("=HH", (self.AA << 8) | self.OP, self.BBBB)
class Instruction21s(Instruction):
"""
This class represents all instructions which have the 21s format
"""
def __init__(self, cm, buff):
super(Instruction21s, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBB = unpack("=h", buff[2:4])[0]
self.formatted_operands = []
if self.OP == 0x16:
self.formatted_operands.append(unpack('=d', pack('=d', self.BBBB))[0])
#log_andro.debug("OP:%x %s AA:%x BBBBB:%x" % (self.OP, args[0], self.AA, self.BBBB))
def get_length(self):
return 4
def get_output(self, idx=-1):
buff = ""
buff += "v%d, %d" % (self.AA, self.BBBB)
if self.formatted_operands != []:
buff += " # %s" % str(self.formatted_operands)
return buff
def get_operands(self, idx=-1):
return [(OPERAND_REGISTER, self.AA), (OPERAND_LITERAL, self.BBBB)]
def get_literals(self):
return [self.BBBB]
def get_formatted_operands(self):
return self.formatted_operands
def get_raw(self):
return pack("=Hh", (self.AA << 8) | self.OP, self.BBBB)
class Instruction22c(Instruction):
"""
This class represents all instructions which have the 22c format
"""
def __init__(self, cm, buff):
super(Instruction22c, self).__init__()
self.cm = cm
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.A = (i16 >> 8) & 0xf
self.B = (i16 >> 12) & 0xf
self.CCCC = unpack("=H", buff[2:4])[0]
#log_andro.debug("OP:%x %s A:%x B:%x CCCC:%x" % (self.OP, args[0], self.A, self.B, self.CCCC))
def get_length(self):
return 4
def get_output(self, idx=-1):
buff = ""
kind = get_kind(self.cm, self.get_kind(), self.CCCC)
buff += "v%d, v%d, %s" % (self.A, self.B, kind)
return buff
def get_operands(self, idx=-1):
kind = get_kind(self.cm, self.get_kind(), self.CCCC)
return [(OPERAND_REGISTER, self.A), (OPERAND_REGISTER, self.B), (self.get_kind() + OPERAND_KIND, self.CCCC, kind)]
def get_ref_kind(self):
return self.CCCC
def get_raw(self):
return pack("=HH", (self.B << 12) | (self.A << 8) | (self.OP), self.CCCC)
class Instruction22cs(Instruction):
"""
This class represents all instructions which have the 22cs format
"""
def __init__(self, cm, buff):
super(Instruction22cs, self).__init__()
self.cm = cm
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.A = (i16 >> 8) & 0xf
self.B = (i16 >> 12) & 0xf
self.CCCC = unpack("=H", buff[2:4])[0]
#log_andro.debug("OP:%x %s A:%x B:%x CCCC:%x" % (self.OP, args[0], self.A, self.B, self.CCCC))
def get_length(self):
return 4
def get_output(self, idx=-1):
buff = ""
kind = get_kind(self.cm, self.get_kind(), self.CCCC)
buff += "v%d, v%d, %s" % (self.A, self.B, kind)
return buff
def get_operands(self, idx=-1):
kind = get_kind(self.cm, self.get_kind(), self.CCCC)
return [(OPERAND_REGISTER, self.A), (OPERAND_REGISTER, self.B), (self.get_kind() + OPERAND_KIND, self.CCCC, kind)]
def get_ref_kind(self):
return self.CCCC
def get_raw(self):
return pack("=HH", (self.B << 12) | (self.A << 8) | (self.OP), self.CCCC)
class Instruction31t(Instruction):
"""
This class represents all instructions which have the 31t format
"""
def __init__(self, cm, buff):
super(Instruction31t, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBBBBBB = unpack("=i", buff[2:6])[0]
#log_andro.debug("OP:%x %s AA:%x BBBBBBBBB:%x" % (self.OP, args[0], self.AA, self.BBBBBBBB))
def get_length(self):
return 6
def get_output(self, idx=-1):
buff = ""
buff += "v%d, +%x (0x%x)" % (self.AA, self.BBBBBBBB, self.BBBBBBBB * 2 + idx)
return buff
def get_operands(self, idx=-1):
return [(OPERAND_REGISTER, self.AA), (OPERAND_LITERAL, self.BBBBBBBB)]
def get_ref_off(self):
return self.BBBBBBBB
def get_raw(self):
return pack("=Hi", (self.AA << 8) | self.OP, self.BBBBBBBB)
class Instruction31c(Instruction):
"""
This class represents all instructions which have the 31c format
"""
def __init__(self, cm, buff):
super(Instruction31c, self).__init__()
self.cm = cm
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBBBBBB = unpack("=I", buff[2:6])[0]
#log_andro.debug("OP:%x %s AA:%x BBBBBBBBB:%x" % (self.OP, args[0], self.AA, self.BBBBBBBB))
def get_length(self):
return 6
def get_output(self, idx=-1):
buff = ""
kind = get_kind(self.cm, self.get_kind(), self.BBBBBBBB)
buff += "v%d, %s" % (self.AA, kind)
return buff
def get_operands(self, idx=-1):
kind = get_kind(self.cm, self.get_kind(), self.BBBBBBBB)
return [(OPERAND_REGISTER, self.AA), (self.get_kind() + OPERAND_KIND, self.BBBBBBBB, kind)]
def get_ref_kind(self):
return self.BBBBBBBB
def get_string(self):
"""
Return the string associated to the 'kind' argument
:rtype: string
"""
return get_kind(self.cm, self.get_kind(), self.BBBBBBBB)
def get_raw_string(self):
return get_kind(self.cm, KIND_RAW_STRING, self.BBBBBBBB)
def get_raw(self):
return pack("=HI", (self.AA << 8) | self.OP, self.BBBBBBBB)
class Instruction12x(Instruction):
"""
This class represents all instructions which have the 12x format
"""
def __init__(self, cm, buff):
super(Instruction12x, self).__init__()
i16 = unpack("=h", buff[0:2])[0]
self.OP = i16 & 0xff
self.A = (i16 >> 8) & 0xf
self.B = (i16 >> 12) & 0xf
#log_andro.debug("OP:%x %s A:%x B:%x" % (self.OP, args[0], self.A, self.B))
def get_length(self):
return 2
def get_output(self, idx=-1):
buff = ""
buff += "v%d, v%d" % (self.A, self.B)
return buff
def get_operands(self, idx=-1):
return [(OPERAND_REGISTER, self.A), (OPERAND_REGISTER, self.B)]
def get_raw(self):
return pack("=H", (self.B << 12) | (self.A << 8) | (self.OP))
class Instruction11x(Instruction):
"""
This class represents all instructions which have the 11x format
"""
def __init__(self, cm, buff):
super(Instruction11x, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
#log_andro.debug("OP:%x %s AA:%x" % (self.OP, args[0], self.AA))
def get_length(self):
return 2
def get_output(self, idx=-1):
buff = ""
buff += "v%d" % (self.AA)
return buff
def get_operands(self, idx=-1):
return [(OPERAND_REGISTER, self.AA)]
def get_raw(self):
return pack("=H", (self.AA << 8) | self.OP)
class Instruction51l(Instruction):
"""
This class represents all instructions which have the 51l format
"""
def __init__(self, cm, buff):
super(Instruction51l, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBBBBBBBBBBBBBB = unpack("=q", buff[2:10])[0]
self.formatted_operands = []
if self.OP == 0x18:
self.formatted_operands.append(unpack('=d', pack('=q', self.BBBBBBBBBBBBBBBB))[0])
#log_andro.debug("OP:%x %s AA:%x BBBBBBBBBBBBBBBB:%x" % (self.OP, args[0], self.AA, self.BBBBBBBBBBBBBBBB))
def get_length(self):
return 10
def get_output(self, idx=-1):
buff = ""
buff += "v%d, %d" % (self.AA, self.BBBBBBBBBBBBBBBB)
if self.formatted_operands:
buff += " # %s" % str(self.formatted_operands)
return buff
def get_operands(self, idx=-1):
return [(OPERAND_REGISTER, self.AA), (OPERAND_LITERAL, self.BBBBBBBBBBBBBBBB)]
def get_formatted_operands(self):
return self.formatted_operands
def get_literals(self):
return [self.BBBBBBBBBBBBBBBB]
def get_raw(self):
return pack("=Hq", (self.AA << 8) | self.OP, self.BBBBBBBBBBBBBBBB)
class Instruction31i(Instruction):
"""
This class represents all instructions which have the 3li format
"""
def __init__(self, cm, buff):
super(Instruction31i, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBBBBBB = unpack("=i", buff[2:6])[0]
self.formatted_operands = []
if self.OP == 0x14:
self.formatted_operands.append(unpack("=f", pack("=i", self.BBBBBBBB))[0])
elif self.OP == 0x17:
self.formatted_operands.append(unpack('=d', pack('=d', self.BBBBBBBB))[0])
#log_andro.debug("OP:%x %s AA:%x BBBBBBBBB:%x" % (self.OP, args[0], self.AA, self.BBBBBBBB))
def get_length(self):
return 6
def get_output(self, idx=-1):
buff = ""
buff += "v%d, %d" % (self.AA, self.BBBBBBBB)
if self.formatted_operands:
buff += " # %s" % str(self.formatted_operands)
return buff
def get_operands(self, idx=-1):
return [(OPERAND_REGISTER, self.AA), (OPERAND_LITERAL, self.BBBBBBBB)]
def get_formatted_operands(self):
return self.formatted_operands
def get_literals(self):
return [self.BBBBBBBB]
def get_raw(self):
return pack("=Hi", (self.AA << 8) | self.OP, self.BBBBBBBB)
class Instruction22x(Instruction):
"""
This class represents all instructions which have the 22x format
"""
def __init__(self, cm, buff):
super(Instruction22x, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBB = unpack("=H", buff[2:4])[0]
#log_andro.debug("OP:%x %s AA:%x BBBBB:%x" % (self.OP, args[0], self.AA, self.BBBB))
def get_length(self):
return 4
def get_output(self, idx=-1):
buff = ""
buff += "v%d, v%d" % (self.AA, self.BBBB)
return buff
def get_operands(self, idx=-1):
return [(OPERAND_REGISTER, self.AA), (OPERAND_REGISTER, self.BBBB)]
def get_raw(self):
return pack("=HH", (self.AA << 8) | self.OP, self.BBBB)
class Instruction23x(Instruction):
"""
This class represents all instructions which have the 23x format
"""
def __init__(self, cm, buff):
super(Instruction23x, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
i16 = unpack("=H", buff[2:4])[0]
self.BB = i16 & 0xff
self.CC = (i16 >> 8) & 0xff
#log_andro.debug("OP:%x %s AA:%x BB:%x CC:%x" % (self.OP, args[0], self.AA, self.BB, self.CC))
def get_length(self):
return 4
def get_output(self, idx=-1):
buff = ""
buff += "v%d, v%d, v%d" % (self.AA, self.BB, self.CC)
return buff
def get_operands(self, idx=-1):
return [(OPERAND_REGISTER, self.AA), (OPERAND_REGISTER, self.BB), (OPERAND_REGISTER, self.CC)]
def get_raw(self):
return pack("=HH", (self.AA << 8) | self.OP, (self.CC << 8) | self.BB)
class Instruction20t(Instruction):
"""
This class represents all instructions which have the 20t format
"""
def __init__(self, cm, buff):
super(Instruction20t, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AAAA = unpack("=h", buff[2:4])[0]
#log_andro.debug("OP:%x %s AAAA:%x" % (self.OP, args[0], self.AAAA))
def get_length(self):
return 4
def get_output(self, idx=-1):
buff = ""
buff += "%+x" % (self.AAAA)
return buff
def get_operands(self, idx=-1):
return [(OPERAND_OFFSET, self.AAAA)]
def get_ref_off(self):
return self.AAAA
def get_raw(self):
return pack("=Hh", self.OP, self.AAAA)
class Instruction21t(Instruction):
"""
This class represents all instructions which have the 21t format
"""
def __init__(self, cm, buff):
super(Instruction21t, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBB = unpack("=h", buff[2:4])[0]
#log_andro.debug("OP:%x %s AA:%x BBBBB:%x" % (self.OP, args[0], self.AA, self.BBBB))
def get_length(self):
return 4
def get_output(self, idx=-1):
buff = ""
buff += "v%d, %+x" % (self.AA, self.BBBB)
return buff
def get_operands(self, idx=-1):
return [(OPERAND_REGISTER, self.AA), (OPERAND_OFFSET, self.BBBB)]
def get_ref_off(self):
return self.BBBB
def get_raw(self):
return pack("=Hh", (self.AA << 8) | self.OP, self.BBBB)
class Instruction10t(Instruction):
"""
This class represents all instructions which have the 10t format
"""
def __init__(self, cm, buff):
super(Instruction10t, self).__init__()
self.OP = unpack("=B", buff[0:1])[0]
self.AA = unpack("=b", buff[1:2])[0]
#log_andro.debug("OP:%x %s AA:%x" % (self.OP, args[0], self.AA))
def get_length(self):
return 2
def get_output(self, idx=-1):
buff = ""
buff += "%+x" % (self.AA)
return buff
def get_operands(self, idx=-1):
return [(OPERAND_OFFSET, self.AA)]
def get_ref_off(self):
return self.AA
def get_raw(self):
return pack("=Bb", self.OP, self.AA)
class Instruction22t(Instruction):
"""
This class represents all instructions which have the 22t format
"""
def __init__(self, cm, buff):
super(Instruction22t, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.A = (i16 >> 8) & 0xf
self.B = (i16 >> 12) & 0xf
self.CCCC = unpack("=h", buff[2:4])[0]
#log_andro.debug("OP:%x %s A:%x B:%x CCCC:%x" % (self.OP, args[0], self.A, self.B, self.CCCC))
def get_length(self):
return 4
def get_output(self, idx=-1):
buff = ""
buff += "v%d, v%d, %+x" % (self.A, self.B, self.CCCC)
return buff
def get_operands(self, idx=-1):
return [(OPERAND_REGISTER, self.A), (OPERAND_REGISTER, self.B), (OPERAND_OFFSET, self.CCCC)]
def get_ref_off(self):
return self.CCCC
def get_raw(self):
return pack("=Hh", (self.B << 12) | (self.A << 8) | self.OP, self.CCCC)
class Instruction22s(Instruction):
"""
This class represents all instructions which have the 22s format
"""
def __init__(self, cm, buff):
super(Instruction22s, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.A = (i16 >> 8) & 0xf
self.B = (i16 >> 12) & 0xf
self.CCCC = unpack("=h", buff[2:4])[0]
#log_andro.debug("OP:%x %s A:%x B:%x CCCC:%x" % (self.OP, args[0], self.A, self.B, self.CCCC))
def get_length(self):
return 4
def get_output(self, idx=-1):
buff = ""
buff += "v%d, v%d, %d" % (self.A, self.B, self.CCCC)
return buff
def get_operands(self, idx=-1):
return [(OPERAND_REGISTER, self.A), (OPERAND_REGISTER, self.B), (OPERAND_LITERAL, self.CCCC)]
def get_literals(self):
return [self.CCCC]
def get_raw(self):
return pack("=Hh", (self.B << 12) | (self.A << 8) | self.OP, self.CCCC)
class Instruction22b(Instruction):
"""
This class represents all instructions which have the 22b format
"""
def __init__(self, cm, buff):
super(Instruction22b, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BB = unpack("=B", buff[2:3])[0]
self.CC = unpack("=b", buff[3:4])[0]
#log_andro.debug("OP:%x %s AA:%x BB:%x CC:%x" % (self.OP, args[0], self.AA, self.BB, self.CC))
def get_length(self):
return 4
def get_output(self, idx=-1):
buff = ""
buff += "v%d, v%d, %d" % (self.AA, self.BB, self.CC)
return buff
def get_operands(self, idx=-1):
return [(OPERAND_REGISTER, self.AA), (OPERAND_REGISTER, self.BB), (OPERAND_LITERAL, self.CC)]
def get_literals(self):
return [self.CC]
def get_raw(self):
return pack("=Hh", (self.AA << 8) | self.OP, (self.CC << 8) | self.BB)
class Instruction30t(Instruction):
"""
This class represents all instructions which have the 30t format
"""
def __init__(self, cm, buff):
super(Instruction30t, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AAAAAAAA = unpack("=i", buff[2:6])[0]
#log_andro.debug("OP:%x %s AAAAAAAA:%x" % (self.OP, args[0], self.AAAAAAAA))
def get_length(self):
return 6
def get_output(self, idx=-1):
buff = ""
buff += "%+x" % (self.AAAAAAAA)
return buff
def get_operands(self, idx=-1):
return [(OPERAND_OFFSET, self.AAAAAAAA)]
def get_ref_off(self):
return self.AAAAAAAA
def get_raw(self):
return pack("=Hi", self.OP, self.AAAAAAAA)
class Instruction3rc(Instruction):
"""
This class represents all instructions which have the 3rc format
"""
def __init__(self, cm, buff):
super(Instruction3rc, self).__init__()
self.cm = cm
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBB = unpack("=H", buff[2:4])[0]
self.CCCC = unpack("=H", buff[4:6])[0]
self.NNNN = self.CCCC + self.AA - 1
#log_andro.debug("OP:%x %s AA:%x BBBB:%x CCCC:%x NNNN:%d" % (self.OP, args[0], self.AA, self.BBBB, self.CCCC, self.NNNN))
def get_length(self):
return 6
def get_output(self, idx=-1):
buff = ""
kind = get_kind(self.cm, self.get_kind(), self.BBBB)
if self.CCCC == self.NNNN:
buff += "v%d, %s" % (self.CCCC, kind)
else:
buff += "v%d ... v%d, %s" % (self.CCCC, self.NNNN, kind)
return buff
def get_operands(self, idx=-1):
kind = get_kind(self.cm, self.get_kind(), self.BBBB)
if self.CCCC == self.NNNN:
return [(OPERAND_REGISTER, self.CCCC), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)]
else:
l = []
for i in range(self.CCCC, self.NNNN):
l.append((OPERAND_REGISTER, i))
l.append((self.get_kind() + OPERAND_KIND, self.BBBB, kind))
return l
def get_ref_kind(self):
return self.BBBB
def get_raw(self):
return pack("=HHH", (self.AA << 8) | self.OP, self.BBBB, self.CCCC)
class Instruction32x(Instruction):
"""
This class represents all instructions which have the 32x format
"""
def __init__(self, cm, buff):
super(Instruction32x, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AAAA = unpack("=H", buff[2:4])[0]
self.BBBB = unpack("=H", buff[4:6])[0]
#log_andro.debug("OP:%x %s AAAAA:%x BBBBB:%x" % (self.OP, args[0], self.AAAA, self.BBBB))
def get_length(self):
return 6
def get_output(self, idx=-1):
buff = ""
buff += "v%d, v%d" % (self.AAAA, self.BBBB)
return buff
def get_operands(self, idx=-1):
return [(OPERAND_REGISTER, self.AAAA), (OPERAND_REGISTER, self.BBBB)]
def get_raw(self):
return pack("=HHH", self.OP, self.AAAA, self.BBBB)
class Instruction20bc(Instruction):
"""
This class represents all instructions which have the 20bc format
"""
def __init__(self, cm, buff):
super(Instruction20bc, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBB = unpack("=H", buff[2:4])[0]
#log_andro.debug("OP:%x %s AA:%x BBBBB:%x" % (self.OP, args[0], self.AA, self.BBBB))
def get_length(self):
return 4
def get_output(self, idx=-1):
buff = ""
buff += "%d, %d" % (self.AA, self.BBBB)
return buff
def get_operands(self, idx=-1):
return [(OPERAND_LITERAL, self.AA), (OPERAND_LITERAL, self.BBBB)]
def get_raw(self):
return pack("=HH", (self.AA << 8) | self.OP, self.BBBB)
class Instruction35mi(Instruction):
"""
This class represents all instructions which have the 35mi format
"""
def __init__(self, cm, buff):
super(Instruction35mi, self).__init__()
self.cm = cm
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.G = (i16 >> 8) & 0xf
self.A = (i16 >> 12) & 0xf
self.BBBB = unpack("=H", buff[2:4])[0]
i16 = unpack("=H", buff[4:6])[0]
self.C = i16 & 0xf
self.D = (i16 >> 4) & 0xf
self.E = (i16 >> 8) & 0xf
self.F = (i16 >> 12) & 0xf
#log_andro.debug("OP:%x %s G:%x A:%x BBBB:%x C:%x D:%x E:%x F:%x" % (self.OP, args[0], self.G, self.A, self.BBBB, self.C, self.D, self.E, self.F))
def get_output(self, idx=-1):
buff = ""
kind = get_kind(self.cm, self.get_kind(), self.BBBB)
if self.A == 1:
buff += "v%d, %s" % (self.C, kind)
elif self.A == 2:
buff += "v%d, v%d, %s" % (self.C, self.D, kind)
elif self.A == 3:
buff += "v%d, v%d, v%d, %s" % (self.C, self.D, self.E, kind)
elif self.A == 4:
buff += "v%d, v%d, v%d, v%d, %s" % (self.C, self.D, self.E, self.F, kind)
elif self.A == 5:
buff += "v%d, v%d, v%d, v%d, v%d, %s" % (self.C, self.D, self.E, self.F, self.G, kind)
return buff
def get_operands(self, idx=-1):
l = []
kind = get_kind(self.cm, self.get_kind(), self.BBBB)
if self.A == 1:
l.extend([(OPERAND_REGISTER, self.C), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)])
elif self.A == 2:
l.extend([(OPERAND_REGISTER, self.C), (OPERAND_REGISTER, self.D), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)])
elif self.A == 3:
l.extend([(OPERAND_REGISTER, self.C), (OPERAND_REGISTER, self.D), (OPERAND_REGISTER, self.E), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)])
elif self.A == 4:
l.extend([(OPERAND_REGISTER, self.C), (OPERAND_REGISTER, self.D), (OPERAND_REGISTER, self.E), (OPERAND_REGISTER, self.F), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)])
elif self.A == 5:
l.extend([(OPERAND_REGISTER, self.C), (OPERAND_REGISTER, self.D), (OPERAND_REGISTER, self.E), (OPERAND_REGISTER, self.F), (OPERAND_REGISTER, self.G), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)])
return l
def get_length(self):
return 6
def get_ref_kind(self):
return self.BBBB
def get_raw(self):
return pack("=HHH", (self.A << 12) | (self.G << 8) | self.OP, self.BBBB, (self.F << 12) | (self.E << 8) | (self.D << 4) | self.C)
class Instruction35ms(Instruction):
"""
This class represents all instructions which have the 35ms format
"""
def __init__(self, cm, buff):
super(Instruction35ms, self).__init__()
self.cm = cm
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.G = (i16 >> 8) & 0xf
self.A = (i16 >> 12) & 0xf
self.BBBB = unpack("=H", buff[2:4])[0]
i16 = unpack("=H", buff[4:6])[0]
self.C = i16 & 0xf
self.D = (i16 >> 4) & 0xf
self.E = (i16 >> 8) & 0xf
self.F = (i16 >> 12) & 0xf
#log_andro.debug("OP:%x %s G:%x A:%x BBBB:%x C:%x D:%x E:%x F:%x" % (self.OP, args[0], self.G, self.A, self.BBBB, self.C, self.D, self.E, self.F))
def get_output(self, idx=-1):
buff = ""
kind = get_kind(self.cm, self.get_kind(), self.BBBB)
if self.A == 1:
buff += "v%d, %s" % (self.C, kind)
elif self.A == 2:
buff += "v%d, v%d, %s" % (self.C, self.D, kind)
elif self.A == 3:
buff += "v%d, v%d, v%d, %s" % (self.C, self.D, self.E, kind)
elif self.A == 4:
buff += "v%d, v%d, v%d, v%d, %s" % (self.C, self.D, self.E, self.F, kind)
elif self.A == 5:
buff += "v%d, v%d, v%d, v%d, v%d, %s" % (self.C, self.D, self.E, self.F, self.G, kind)
return buff
def get_operands(self, idx=-1):
l = []
kind = get_kind(self.cm, self.get_kind(), self.BBBB)
if self.A == 1:
l.extend([(OPERAND_REGISTER, self.C), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)])
elif self.A == 2:
l.extend([(OPERAND_REGISTER, self.C), (OPERAND_REGISTER, self.D), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)])
elif self.A == 3:
l.extend([(OPERAND_REGISTER, self.C), (OPERAND_REGISTER, self.D), (OPERAND_REGISTER, self.E), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)])
elif self.A == 4:
l.extend([(OPERAND_REGISTER, self.C), (OPERAND_REGISTER, self.D), (OPERAND_REGISTER, self.E), (OPERAND_REGISTER, self.F), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)])
elif self.A == 5:
l.extend([(OPERAND_REGISTER, self.C), (OPERAND_REGISTER, self.D), (OPERAND_REGISTER, self.E), (OPERAND_REGISTER, self.F), (OPERAND_REGISTER, self.G), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)])
return l
def get_length(self):
return 6
def get_ref_kind(self):
return self.BBBB
def get_raw(self):
return pack("=HHH", (self.A << 12) | (self.G << 8) | self.OP, self.BBBB, (self.F << 12) | (self.E << 8) | (self.D << 4) | self.C)
class Instruction3rmi(Instruction):
"""
This class represents all instructions which have the 3rmi format
"""
def __init__(self, cm, buff):
super(Instruction3rmi, self).__init__()
self.cm = cm
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBB = unpack("=H", buff[2:4])[0]
self.CCCC = unpack("=H", buff[4:6])[0]
self.NNNN = self.CCCC + self.AA - 1
#log_andro.debug("OP:%x %s AA:%x BBBB:%x CCCC:%x NNNN:%d" % (self.OP, args[0], self.AA, self.BBBB, self.CCCC, self.NNNN))
def get_length(self):
return 6
def get_output(self, idx=-1):
buff = ""
kind = get_kind(self.cm, self.get_kind(), self.BBBB)
if self.CCCC == self.NNNN:
buff += "v%d, %s" % (self.CCCC, kind)
else:
buff += "v%d ... v%d, %s" % (self.CCCC, self.NNNN, kind)
return buff
def get_operands(self, idx=-1):
kind = get_kind(self.cm, self.get_kind(), self.BBBB)
if self.CCCC == self.NNNN:
return [(OPERAND_REGISTER, self.CCCC), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)]
else:
l = []
for i in range(self.CCCC, self.NNNN):
l.append((OPERAND_REGISTER, i))
l.append((self.get_kind() + OPERAND_KIND, self.BBBB, kind))
return l
def get_ref_kind(self):
return self.BBBB
def get_raw(self):
return pack("=HHH", (self.AA << 8) | self.OP, self.BBBB, self.CCCC)
class Instruction3rms(Instruction):
"""
This class represents all instructions which have the 3rms format
"""
def __init__(self, cm, buff):
super(Instruction3rms, self).__init__()
self.cm = cm
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBB = unpack("=H", buff[2:4])[0]
self.CCCC = unpack("=H", buff[4:6])[0]
self.NNNN = self.CCCC + self.AA - 1
#log_andro.debug("OP:%x %s AA:%x BBBB:%x CCCC:%x NNNN:%d" % (self.OP, args[0], self.AA, self.BBBB, self.CCCC, self.NNNN))
def get_length(self):
return 6
def get_output(self, idx=-1):
buff = ""
kind = get_kind(self.cm, self.get_kind(), self.BBBB)
if self.CCCC == self.NNNN :
buff += "v%d, %s" % (self.CCCC, kind)
else :
buff += "v%d ... v%d, %s" % (self.CCCC, self.NNNN, kind)
return buff
def get_operands(self, idx=-1):
kind = get_kind(self.cm, self.get_kind(), self.BBBB)
if self.CCCC == self.NNNN:
return [(OPERAND_REGISTER, self.CCCC), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)]
else:
l = []
for i in range(self.CCCC, self.NNNN):
l.append((OPERAND_REGISTER, i))
l.append((self.get_kind() + OPERAND_KIND, self.BBBB, kind))
return l
def get_ref_kind(self):
return self.BBBB
def get_raw(self):
return pack("=HHH", (self.AA << 8) | self.OP, self.BBBB, self.CCCC)
class Instruction41c(Instruction):
"""
This class represents all instructions which have the 41c format
"""
def __init__(self, cm, buff):
super(Instruction41c, self).__init__()
self.cm = cm
self.OP = unpack("=H", buff[0:2])[0]
self.BBBBBBBB = unpack("=I", buff[2:6])[0]
self.AAAA = unpack("=H", buff[6:8])[0]
#log_andro.debug("OP:%x %s AAAAA:%x BBBBB:%x" % (self.OP, args[0], self.AAAA, self.BBBBBBBB))
def get_length(self):
return 8
def get_output(self, idx=-1):
kind = get_kind(self.cm, self.get_kind(), self.BBBBBBBB)
buff = ""
buff += "v%d, %s" % (self.AAAA, kind)
return buff
def get_operands(self, idx=-1):
kind = get_kind(self.cm, self.get_kind(), self.BBBBBBBB)
return [(OPERAND_REGISTER, self.AAAA), (self.get_kind() + OPERAND_KIND, self.BBBBBBBB, kind)]
def get_ref_kind(self):
return self.BBBBBBBB
def get_raw(self):
return pack("=HIH", self.OP, self.BBBBBBBB, self.AAAA)
class Instruction40sc(Instruction):
"""
This class represents all instructions which have the 40sc format
"""
def __init__(self, cm, buff):
super(Instruction40sc, self).__init__()
self.cm = cm
self.OP = unpack("=H", buff[0:2])[0]
self.BBBBBBBB = unpack("=I", buff[2:6])[0]
self.AAAA = unpack("=H", buff[6:8])[0]
#log_andro.debug("OP:%x %s AAAAA:%x BBBBB:%x" % (self.OP, args[0], self.AAAA, self.BBBBBBBB))
def get_length(self):
return 8
def get_output(self, idx=-1):
kind = get_kind(self.cm, self.get_kind(), self.BBBBBBBB)
buff = ""
buff += "%d, %s" % (self.AAAA, kind)
return buff
def get_operands(self, idx=-1):
kind = get_kind(self.cm, self.get_kind(), self.BBBBBBBB)
return [(OPERAND_LITERAL, self.AAAA), (self.get_kind() + OPERAND_KIND, self.BBBBBBBB, kind)]
def get_ref_kind(self):
return self.BBBBBBBB
def get_raw(self):
return pack("=HIH", self.OP, self.BBBBBBBB, self.AAAA)
class Instruction52c(Instruction):
"""
This class represents all instructions which have the 52c format
"""
def __init__(self, cm, buff):
super(Instruction52c, self).__init__()
self.cm = cm
self.OP = unpack("=H", buff[0:2])[0]
self.CCCCCCCC = unpack("=I", buff[2:6])[0]
self.AAAA = unpack("=H", buff[6:8])[0]
self.BBBB = unpack("=H", buff[8:10])[0]
#log_andro.debug("OP:%x %s AAAAA:%x BBBBB:%x" % (self.OP, args[0], self.AAAA, self.BBBB))
def get_length(self):
return 10
def get_output(self, idx=-1):
kind = get_kind(self.cm, self.get_kind(), self.CCCCCCCC)
buff = ""
buff += "v%d, v%d, %s" % (self.AAAA, self.BBBB, kind)
return buff
def get_operands(self, idx=-1):
kind = get_kind(self.cm, self.get_kind(), self.CCCCCCCC)
return [(OPERAND_LITERAL, self.AAAA), (OPERAND_LITERAL, self.BBBB), (self.get_kind() + OPERAND_KIND, self.CCCCCCCC, kind)]
def get_ref_kind(self):
return self.CCCCCCCC
def get_raw(self):
return pack("=HIHH", self.OP, self.CCCCCCCC, self.AAAA, self.BBBB)
class Instruction5rc(Instruction):
"""
This class represents all instructions which have the 5rc format
"""
def __init__(self, cm, buff):
super(Instruction5rc, self).__init__()
self.cm = cm
self.OP = unpack("=H", buff[0:2])[0]
self.BBBBBBBB = unpack("=I", buff[2:6])[0]
self.AAAA = unpack("=H", buff[6:8])[0]
self.CCCC = unpack("=H", buff[8:10])[0]
self.NNNN = self.CCCC + self.AAAA - 1
#log_andro.debug("OP:%x %s AA:%x BBBB:%x CCCC:%x NNNN:%d" % (self.OP, args[0], self.AAAA, self.BBBBBBBB, self.CCCC, self.NNNN))
def get_length(self):
return 10
def get_output(self, idx=-1):
buff = ""
kind = get_kind(self.cm, self.get_kind(), self.BBBBBBBB)
if self.CCCC == self.NNNN:
buff += "v%d, %s" % (self.CCCC, kind)
else:
buff += "v%d ... v%d, %s" % (self.CCCC, self.NNNN, kind)
return buff
def get_operands(self, idx=-1):
kind = get_kind(self.cm, self.get_kind(), self.BBBBBBBB)
if self.CCCC == self.NNNN:
return [(OPERAND_REGISTER, self.CCCC), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)]
else:
l = []
for i in range(self.CCCC, self.NNNN):
l.append((OPERAND_REGISTER, i))
l.append((self.get_kind() + OPERAND_KIND, self.BBBB, kind))
return l
def get_ref_kind(self):
return self.BBBBBBBB
def get_raw(self):
return pack("=HIHH", self.OP, self.BBBBBBBB, self.AAAA, self.CCCC)
DALVIK_OPCODES_FORMAT = {
0x00 : [Instruction10x, [ "nop" ] ],
0x01 : [Instruction12x, [ "move" ] ],
0x02 : [Instruction22x, [ "move/from16" ] ],
0x03 : [Instruction32x, [ "move/16" ] ],
0x04 : [Instruction12x, [ "move-wide" ] ],
0x05 : [Instruction22x, [ "move-wide/from16" ] ],
0x06 : [Instruction32x, [ "move-wide/16" ] ],
0x07 : [Instruction12x, [ "move-object" ] ],
0x08 : [Instruction22x, [ "move-object/from16" ] ],
0x09 : [Instruction32x, [ "move-object/16" ] ],
0x0a : [Instruction11x, [ "move-result" ] ],
0x0b : [Instruction11x, [ "move-result-wide" ] ],
0x0c : [Instruction11x, [ "move-result-object" ] ],
0x0d : [Instruction11x, [ "move-exception" ] ],
0x0e : [Instruction10x, [ "return-void" ] ],
0x0f : [Instruction11x, [ "return" ] ],
0x10 : [Instruction11x, [ "return-wide" ] ],
0x11 : [Instruction11x, [ "return-object" ] ],
0x12 : [Instruction11n, [ "const/4" ] ],
0x13 : [Instruction21s, [ "const/16" ] ],
0x14 : [Instruction31i, [ "const" ] ],
0x15 : [Instruction21h, [ "const/high16" ] ],
0x16 : [Instruction21s, [ "const-wide/16" ] ],
0x17 : [Instruction31i, [ "const-wide/32" ] ],
0x18 : [Instruction51l, [ "const-wide" ] ],
0x19 : [Instruction21h, [ "const-wide/high16" ] ],
0x1a : [Instruction21c, [ "const-string", KIND_STRING ] ],
0x1b : [Instruction31c, [ "const-string/jumbo", KIND_STRING ] ],
0x1c : [Instruction21c, [ "const-class", KIND_TYPE ] ],
0x1d : [Instruction11x, [ "monitor-enter" ] ],
0x1e : [Instruction11x, [ "monitor-exit" ] ],
0x1f : [Instruction21c, [ "check-cast", KIND_TYPE ] ],
0x20 : [Instruction22c, [ "instance-of", KIND_TYPE ] ],
0x21 : [Instruction12x, [ "array-length", KIND_TYPE ] ],
0x22 : [Instruction21c, [ "new-instance", KIND_TYPE ] ],
0x23 : [Instruction22c, [ "new-array", KIND_TYPE ] ],
0x24 : [Instruction35c, [ "filled-new-array", KIND_TYPE ] ],
0x25 : [Instruction3rc, [ "filled-new-array/range", KIND_TYPE ] ],
0x26 : [Instruction31t, [ "fill-array-data" ] ],
0x27 : [Instruction11x, [ "throw" ] ],
0x28 : [Instruction10t, [ "goto" ] ],
0x29 : [Instruction20t, [ "goto/16" ] ],
0x2a : [Instruction30t, [ "goto/32" ] ],
0x2b : [Instruction31t, [ "packed-switch" ] ],
0x2c : [Instruction31t, [ "sparse-switch" ] ],
0x2d : [Instruction23x, [ "cmpl-float" ] ],
0x2e : [Instruction23x, [ "cmpg-float" ] ],
0x2f : [Instruction23x, [ "cmpl-double" ] ],
0x30 : [Instruction23x, [ "cmpg-double" ] ],
0x31 : [Instruction23x, [ "cmp-long" ] ],
0x32 : [Instruction22t, [ "if-eq" ] ],
0x33 : [Instruction22t, [ "if-ne" ] ],
0x34 : [Instruction22t, [ "if-lt" ] ],
0x35 : [Instruction22t, [ "if-ge" ] ],
0x36 : [Instruction22t, [ "if-gt" ] ],
0x37 : [Instruction22t, [ "if-le" ] ],
0x38 : [Instruction21t, [ "if-eqz" ] ],
0x39 : [Instruction21t, [ "if-nez" ] ],
0x3a : [Instruction21t, [ "if-ltz" ] ],
0x3b : [Instruction21t, [ "if-gez" ] ],
0x3c : [Instruction21t, [ "if-gtz" ] ],
0x3d : [Instruction21t, [ "if-lez" ] ],
#unused
0x3e : [Instruction10x, [ "nop" ] ],
0x3f : [Instruction10x, [ "nop" ] ],
0x40 : [Instruction10x, [ "nop" ] ],
0x41 : [Instruction10x, [ "nop" ] ],
0x42 : [Instruction10x, [ "nop" ] ],
0x43 : [Instruction10x, [ "nop" ] ],
0x44 : [Instruction23x, [ "aget" ] ],
0x45 : [Instruction23x, [ "aget-wide" ] ],
0x46 : [Instruction23x, [ "aget-object" ] ],
0x47 : [Instruction23x, [ "aget-boolean" ] ],
0x48 : [Instruction23x, [ "aget-byte" ] ],
0x49 : [Instruction23x, [ "aget-char" ] ],
0x4a : [Instruction23x, [ "aget-short" ] ],
0x4b : [Instruction23x, [ "aput" ] ],
0x4c : [Instruction23x, [ "aput-wide" ] ],
0x4d : [Instruction23x, [ "aput-object" ] ],
0x4e : [Instruction23x, [ "aput-boolean" ] ],
0x4f : [Instruction23x, [ "aput-byte" ] ],
0x50 : [Instruction23x, [ "aput-char" ] ],
0x51 : [Instruction23x, [ "aput-short" ] ],
0x52 : [Instruction22c, [ "iget", KIND_FIELD ] ],
0x53 : [Instruction22c, [ "iget-wide", KIND_FIELD ] ],
0x54 : [Instruction22c, [ "iget-object", KIND_FIELD ] ],
0x55 : [Instruction22c, [ "iget-boolean", KIND_FIELD ] ],
0x56 : [Instruction22c, [ "iget-byte", KIND_FIELD ] ],
0x57 : [Instruction22c, [ "iget-char", KIND_FIELD ] ],
0x58 : [Instruction22c, [ "iget-short", KIND_FIELD ] ],
0x59 : [Instruction22c, [ "iput", KIND_FIELD ] ],
0x5a : [Instruction22c, [ "iput-wide", KIND_FIELD ] ],
0x5b : [Instruction22c, [ "iput-object", KIND_FIELD ] ],
0x5c : [Instruction22c, [ "iput-boolean", KIND_FIELD ] ],
0x5d : [Instruction22c, [ "iput-byte", KIND_FIELD ] ],
0x5e : [Instruction22c, [ "iput-char", KIND_FIELD ] ],
0x5f : [Instruction22c, [ "iput-short", KIND_FIELD ] ],
0x60 : [Instruction21c, [ "sget", KIND_FIELD ] ],
0x61 : [Instruction21c, [ "sget-wide", KIND_FIELD ] ],
0x62 : [Instruction21c, [ "sget-object", KIND_FIELD ] ],
0x63 : [Instruction21c, [ "sget-boolean", KIND_FIELD ] ],
0x64 : [Instruction21c, [ "sget-byte", KIND_FIELD ] ],
0x65 : [Instruction21c, [ "sget-char", KIND_FIELD ] ],
0x66 : [Instruction21c, [ "sget-short", KIND_FIELD ] ],
0x67 : [Instruction21c, [ "sput", KIND_FIELD ] ],
0x68 : [Instruction21c, [ "sput-wide", KIND_FIELD ] ],
0x69 : [Instruction21c, [ "sput-object", KIND_FIELD ] ],
0x6a : [Instruction21c, [ "sput-boolean", KIND_FIELD ] ],
0x6b : [Instruction21c, [ "sput-byte", KIND_FIELD ] ],
0x6c : [Instruction21c, [ "sput-char", KIND_FIELD ] ],
0x6d : [Instruction21c, [ "sput-short", KIND_FIELD ] ],
0x6e : [Instruction35c, [ "invoke-virtual", KIND_METH ] ],
0x6f : [Instruction35c, [ "invoke-super", KIND_METH ] ],
0x70 : [Instruction35c, [ "invoke-direct", KIND_METH ] ],
0x71 : [Instruction35c, [ "invoke-static", KIND_METH ] ],
0x72 : [Instruction35c, [ "invoke-interface", KIND_METH ] ],
# unused
0x73 : [Instruction10x, [ "nop" ] ],
0x74 : [Instruction3rc, [ "invoke-virtual/range", KIND_METH ] ],
0x75 : [Instruction3rc, [ "invoke-super/range", KIND_METH ] ],
0x76 : [Instruction3rc, [ "invoke-direct/range", KIND_METH ] ],
0x77 : [Instruction3rc, [ "invoke-static/range", KIND_METH ] ],
0x78 : [Instruction3rc, [ "invoke-interface/range", KIND_METH ] ],
# unused
0x79 : [Instruction10x, [ "nop" ] ],
0x7a : [Instruction10x, [ "nop" ] ],
0x7b : [Instruction12x, [ "neg-int" ] ],
0x7c : [Instruction12x, [ "not-int" ] ],
0x7d : [Instruction12x, [ "neg-long" ] ],
0x7e : [Instruction12x, [ "not-long" ] ],
0x7f : [Instruction12x, [ "neg-float" ] ],
0x80 : [Instruction12x, [ "neg-double" ] ],
0x81 : [Instruction12x, [ "int-to-long" ] ],
0x82 : [Instruction12x, [ "int-to-float" ] ],
0x83 : [Instruction12x, [ "int-to-double" ] ],
0x84 : [Instruction12x, [ "long-to-int" ] ],
0x85 : [Instruction12x, [ "long-to-float" ] ],
0x86 : [Instruction12x, [ "long-to-double" ] ],
0x87 : [Instruction12x, [ "float-to-int" ] ],
0x88 : [Instruction12x, [ "float-to-long" ] ],
0x89 : [Instruction12x, [ "float-to-double" ] ],
0x8a : [Instruction12x, [ "double-to-int" ] ],
0x8b : [Instruction12x, [ "double-to-long" ] ],
0x8c : [Instruction12x, [ "double-to-float" ] ],
0x8d : [Instruction12x, [ "int-to-byte" ] ],
0x8e : [Instruction12x, [ "int-to-char" ] ],
0x8f : [Instruction12x, [ "int-to-short" ] ],
0x90 : [Instruction23x, [ "add-int" ] ],
0x91 : [Instruction23x, [ "sub-int" ] ],
0x92 : [Instruction23x, [ "mul-int" ] ],
0x93 : [Instruction23x, [ "div-int" ] ],
0x94 : [Instruction23x, [ "rem-int" ] ],
0x95 : [Instruction23x, [ "and-int" ] ],
0x96 : [Instruction23x, [ "or-int" ] ],
0x97 : [Instruction23x, [ "xor-int" ] ],
0x98 : [Instruction23x, [ "shl-int" ] ],
0x99 : [Instruction23x, [ "shr-int" ] ],
0x9a : [Instruction23x, [ "ushr-int" ] ],
0x9b : [Instruction23x, [ "add-long" ] ],
0x9c : [Instruction23x, [ "sub-long" ] ],
0x9d : [Instruction23x, [ "mul-long" ] ],
0x9e : [Instruction23x, [ "div-long" ] ],
0x9f : [Instruction23x, [ "rem-long" ] ],
0xa0 : [Instruction23x, [ "and-long" ] ],
0xa1 : [Instruction23x, [ "or-long" ] ],
0xa2 : [Instruction23x, [ "xor-long" ] ],
0xa3 : [Instruction23x, [ "shl-long" ] ],
0xa4 : [Instruction23x, [ "shr-long" ] ],
0xa5 : [Instruction23x, [ "ushr-long" ] ],
0xa6 : [Instruction23x, [ "add-float" ] ],
0xa7 : [Instruction23x, [ "sub-float" ] ],
0xa8 : [Instruction23x, [ "mul-float" ] ],
0xa9 : [Instruction23x, [ "div-float" ] ],
0xaa : [Instruction23x, [ "rem-float" ] ],
0xab : [Instruction23x, [ "add-double" ] ],
0xac : [Instruction23x, [ "sub-double" ] ],
0xad : [Instruction23x, [ "mul-double" ] ],
0xae : [Instruction23x, [ "div-double" ] ],
0xaf : [Instruction23x, [ "rem-double" ] ],
0xb0 : [Instruction12x, [ "add-int/2addr" ] ],
0xb1 : [Instruction12x, [ "sub-int/2addr" ] ],
0xb2 : [Instruction12x, [ "mul-int/2addr" ] ],
0xb3 : [Instruction12x, [ "div-int/2addr" ] ],
0xb4 : [Instruction12x, [ "rem-int/2addr" ] ],
0xb5 : [Instruction12x, [ "and-int/2addr" ] ],
0xb6 : [Instruction12x, [ "or-int/2addr" ] ],
0xb7 : [Instruction12x, [ "xor-int/2addr" ] ],
0xb8 : [Instruction12x, [ "shl-int/2addr" ] ],
0xb9 : [Instruction12x, [ "shr-int/2addr" ] ],
0xba : [Instruction12x, [ "ushr-int/2addr" ] ],
0xbb : [Instruction12x, [ "add-long/2addr" ] ],
0xbc : [Instruction12x, [ "sub-long/2addr" ] ],
0xbd : [Instruction12x, [ "mul-long/2addr" ] ],
0xbe : [Instruction12x, [ "div-long/2addr" ] ],
0xbf : [Instruction12x, [ "rem-long/2addr" ] ],
0xc0 : [Instruction12x, [ "and-long/2addr" ] ],
0xc1 : [Instruction12x, [ "or-long/2addr" ] ],
0xc2 : [Instruction12x, [ "xor-long/2addr" ] ],
0xc3 : [Instruction12x, [ "shl-long/2addr" ] ],
0xc4 : [Instruction12x, [ "shr-long/2addr" ] ],
0xc5 : [Instruction12x, [ "ushr-long/2addr" ] ],
0xc6 : [Instruction12x, [ "add-float/2addr" ] ],
0xc7 : [Instruction12x, [ "sub-float/2addr" ] ],
0xc8 : [Instruction12x, [ "mul-float/2addr" ] ],
0xc9 : [Instruction12x, [ "div-float/2addr" ] ],
0xca : [Instruction12x, [ "rem-float/2addr" ] ],
0xcb : [Instruction12x, [ "add-double/2addr" ] ],
0xcc : [Instruction12x, [ "sub-double/2addr" ] ],
0xcd : [Instruction12x, [ "mul-double/2addr" ] ],
0xce : [Instruction12x, [ "div-double/2addr" ] ],
0xcf : [Instruction12x, [ "rem-double/2addr" ] ],
0xd0 : [Instruction22s, [ "add-int/lit16" ] ],
0xd1 : [Instruction22s, [ "rsub-int" ] ],
0xd2 : [Instruction22s, [ "mul-int/lit16" ] ],
0xd3 : [Instruction22s, [ "div-int/lit16" ] ],
0xd4 : [Instruction22s, [ "rem-int/lit16" ] ],
0xd5 : [Instruction22s, [ "and-int/lit16" ] ],
0xd6 : [Instruction22s, [ "or-int/lit16" ] ],
0xd7 : [Instruction22s, [ "xor-int/lit16" ] ],
0xd8 : [Instruction22b, [ "add-int/lit8" ] ],
0xd9 : [Instruction22b, [ "rsub-int/lit8" ] ],
0xda : [Instruction22b, [ "mul-int/lit8" ] ],
0xdb : [Instruction22b, [ "div-int/lit8" ] ],
0xdc : [Instruction22b, [ "rem-int/lit8" ] ],
0xdd : [Instruction22b, [ "and-int/lit8" ] ],
0xde : [Instruction22b, [ "or-int/lit8" ] ],
0xdf : [Instruction22b, [ "xor-int/lit8" ] ],
0xe0 : [Instruction22b, [ "shl-int/lit8" ] ],
0xe1 : [Instruction22b, [ "shr-int/lit8" ] ],
0xe2 : [Instruction22b, [ "ushr-int/lit8" ] ],
# expanded opcodes
0xe3 : [Instruction22c, [ "iget-volatile", KIND_FIELD ] ],
0xe4 : [Instruction22c, [ "iput-volatile", KIND_FIELD ] ],
0xe5 : [Instruction21c, [ "sget-volatile", KIND_FIELD ] ],
0xe6 : [Instruction21c, [ "sput-volatile", KIND_FIELD ] ],
0xe7 : [Instruction22c, [ "iget-object-volatile", KIND_FIELD ] ],
0xe8 : [Instruction22c, [ "iget-wide-volatile", KIND_FIELD ] ],
0xe9 : [Instruction22c, [ "iput-wide-volatile", KIND_FIELD ] ],
0xea : [Instruction21c, [ "sget-wide-volatile", KIND_FIELD ] ],
0xeb : [Instruction21c, [ "sput-wide-volatile", KIND_FIELD ] ],
0xec : [Instruction10x, [ "breakpoint" ] ],
0xed : [Instruction20bc, [ "throw-verification-error", VARIES ] ],
0xee : [Instruction35mi, [ "execute-inline", INLINE_METHOD ] ],
0xef : [Instruction3rmi, [ "execute-inline/range", INLINE_METHOD ] ],
0xf0 : [Instruction35c, [ "invoke-object-init/range", KIND_METH ] ],
0xf1 : [Instruction10x, [ "return-void-barrier" ] ],
0xf2 : [Instruction22cs, [ "iget-quick", FIELD_OFFSET ] ],
0xf3 : [Instruction22cs, [ "iget-wide-quick", FIELD_OFFSET ] ],
0xf4 : [Instruction22cs, [ "iget-object-quick", FIELD_OFFSET ] ],
0xf5 : [Instruction22cs, [ "iput-quick", FIELD_OFFSET ] ],
0xf6 : [Instruction22cs, [ "iput-wide-quick", FIELD_OFFSET ] ],
0xf7 : [Instruction22cs, [ "iput-object-quick", FIELD_OFFSET ] ],
0xf8 : [Instruction35ms, [ "invoke-virtual-quick", VTABLE_OFFSET ] ],
0xf9 : [Instruction3rms, [ "invoke-virtual-quick/range", VTABLE_OFFSET ] ],
0xfa : [Instruction35ms, [ "invoke-super-quick", VTABLE_OFFSET ] ],
0xfb : [Instruction3rms, [ "invoke-super-quick/range", VTABLE_OFFSET ] ],
0xfc : [Instruction22c, [ "iput-object-volatile", KIND_FIELD ] ],
0xfd : [Instruction21c, [ "sget-object-volatile", KIND_FIELD ] ],
0xfe : [Instruction21c, [ "sput-object-volatile", KIND_FIELD ] ],
}
DALVIK_OPCODES_PAYLOAD = {
0x0100 : [PackedSwitch],
0x0200 : [SparseSwitch],
0x0300 : [FillArrayData],
}
INLINE_METHODS = [
[ "Lorg/apache/harmony/dalvik/NativeTestTarget;", "emptyInlineMethod", "()V" ],
[ "Ljava/lang/String;", "charAt", "(I)C" ],
[ "Ljava/lang/String;", "compareTo", "(Ljava/lang/String;)I" ],
[ "Ljava/lang/String;", "equals", "(Ljava/lang/Object;)Z" ],
[ "Ljava/lang/String;", "fastIndexOf", "(II)I" ],
[ "Ljava/lang/String;", "isEmpty", "()Z" ],
[ "Ljava/lang/String;", "length", "()I" ],
[ "Ljava/lang/Math;", "abs", "(I)I" ],
[ "Ljava/lang/Math;", "abs", "(J)J" ],
[ "Ljava/lang/Math;", "abs", "(F)F" ],
[ "Ljava/lang/Math;", "abs", "(D)D" ],
[ "Ljava/lang/Math;", "min", "(II)I" ],
[ "Ljava/lang/Math;", "max", "(II)I" ],
[ "Ljava/lang/Math;", "sqrt", "(D)D" ],
[ "Ljava/lang/Math;", "cos", "(D)D" ],
[ "Ljava/lang/Math;", "sin", "(D)D" ],
[ "Ljava/lang/Float;", "floatToIntBits", "(F)I" ],
[ "Ljava/lang/Float;", "floatToRawIntBits", "(F)I" ],
[ "Ljava/lang/Float;", "intBitsToFloat", "(I)F" ],
[ "Ljava/lang/Double;", "doubleToLongBits", "(D)J" ],
[ "Ljava/lang/Double;", "doubleToRawLongBits", "(D)J" ],
[ "Ljava/lang/Double;", "longBitsToDouble", "(J)D" ],
]
DALVIK_OPCODES_EXTENDED_WIDTH = {
0x00ff: [ Instruction41c, ["const-class/jumbo", KIND_TYPE ] ],
0x01ff: [ Instruction41c, ["check-cast/jumbo", KIND_TYPE ] ],
0x02ff: [ Instruction52c, ["instance-of/jumbo", KIND_TYPE ] ],
0x03ff: [ Instruction41c, ["new-instance/jumbo", KIND_TYPE ] ],
0x04ff: [ Instruction52c, ["new-array/jumbo", KIND_TYPE ] ],
0x05ff: [ Instruction5rc, ["filled-new-array/jumbo", KIND_TYPE ] ],
0x06ff: [ Instruction52c, ["iget/jumbo", KIND_FIELD ] ],
0x07ff: [ Instruction52c, ["iget-wide/jumbo", KIND_FIELD ] ],
0x08ff: [ Instruction52c, ["iget-object/jumbo", KIND_FIELD ] ],
0x09ff: [ Instruction52c, ["iget-boolean/jumbo", KIND_FIELD ] ],
0x0aff: [ Instruction52c, ["iget-byte/jumbo", KIND_FIELD ] ],
0x0bff: [ Instruction52c, ["iget-char/jumbo", KIND_FIELD ] ],
0x0cff: [ Instruction52c, ["iget-short/jumbo", KIND_FIELD ] ],
0x0dff: [ Instruction52c, ["iput/jumbo", KIND_FIELD ] ],
0x0eff: [ Instruction52c, ["iput-wide/jumbo", KIND_FIELD ] ],
0x0fff: [ Instruction52c, ["iput-object/jumbo", KIND_FIELD ] ],
0x10ff: [ Instruction52c, ["iput-boolean/jumbo", KIND_FIELD ] ],
0x11ff: [ Instruction52c, ["iput-byte/jumbo", KIND_FIELD ] ],
0x12ff: [ Instruction52c, ["iput-char/jumbo", KIND_FIELD ] ],
0x13ff: [ Instruction52c, ["iput-short/jumbo", KIND_FIELD ] ],
0x14ff: [ Instruction41c, ["sget/jumbo", KIND_FIELD ] ],
0x15ff: [ Instruction41c, ["sget-wide/jumbo", KIND_FIELD ] ],
0x16ff: [ Instruction41c, ["sget-object/jumbo", KIND_FIELD ] ],
0x17ff: [ Instruction41c, ["sget-boolean/jumbo", KIND_FIELD ] ],
0x18ff: [ Instruction41c, ["sget-byte/jumbo", KIND_FIELD ] ],
0x19ff: [ Instruction41c, ["sget-char/jumbo", KIND_FIELD ] ],
0x1aff: [ Instruction41c, ["sget-short/jumbo", KIND_FIELD ] ],
0x1bff: [ Instruction41c, ["sput/jumbo", KIND_FIELD ] ],
0x1cff: [ Instruction41c, ["sput-wide/jumbo", KIND_FIELD ] ],
0x1dff: [ Instruction41c, ["sput-object/jumbo", KIND_FIELD ] ],
0x1eff: [ Instruction41c, ["sput-boolean/jumbo", KIND_FIELD ] ],
0x1fff: [ Instruction41c, ["sput-byte/jumbo", KIND_FIELD ] ],
0x20ff: [ Instruction41c, ["sput-char/jumbo", KIND_FIELD ] ],
0x21ff: [ Instruction41c, ["sput-short/jumbo", KIND_FIELD ] ],
0x22ff: [ Instruction5rc, ["invoke-virtual/jumbo", KIND_METH ] ],
0x23ff: [ Instruction5rc, ["invoke-super/jumbo", KIND_METH ] ],
0x24ff: [ Instruction5rc, ["invoke-direct/jumbo", KIND_METH ] ],
0x25ff: [ Instruction5rc, ["invoke-static/jumbo", KIND_METH ] ],
0x26ff: [ Instruction5rc, ["invoke-interface/jumbo", KIND_METH ] ],
}
DALVIK_OPCODES_OPTIMIZED = {
0xf2ff : [ Instruction5rc, ["invoke-object-init/jumbo", KIND_METH ] ],
0xf3ff : [ Instruction52c, ["iget-volatile/jumbo", KIND_FIELD ] ],
0xf4ff : [ Instruction52c, ["iget-wide-volatile/jumbo", KIND_FIELD ] ],
0xf5ff : [ Instruction52c, ["iget-object-volatile/jumbo ", KIND_FIELD ] ],
0xf6ff : [ Instruction52c, ["iput-volatile/jumbo", KIND_FIELD ] ],
0xf7ff : [ Instruction52c, ["iput-wide-volatile/jumbo", KIND_FIELD ] ],
0xf8ff : [ Instruction52c, ["iput-object-volatile/jumbo", KIND_FIELD ] ],
0xf9ff : [ Instruction41c, ["sget-volatile/jumbo", KIND_FIELD ] ],
0xfaff : [ Instruction41c, ["sget-wide-volatile/jumbo", KIND_FIELD ] ],
0xfbff : [ Instruction41c, ["sget-object-volatile/jumbo", KIND_FIELD ] ],
0xfcff : [ Instruction41c, ["sput-volatile/jumbo", KIND_FIELD ] ],
0xfdff : [ Instruction41c, ["sput-wide-volatile/jumbo", KIND_FIELD ] ],
0xfeff : [ Instruction41c, ["sput-object-volatile/jumbo", KIND_FIELD ] ],
0xffff : [ Instruction40sc, ["throw-verification-error/jumbo", VARIES ] ],
}
class Unresolved(Instruction):
def __init__(self, cm, data):
self.cm = cm
self.data = data
def get_name(self):
return "unresolved"
def get_operands(self, idx=-1):
return [(OPERAND_KIND + KIND_STRING, -1, "AG:OP: invalid opcode " + repr(self.data))]
def get_op_value(self):
return -1
def get_output(self, idx=-1):
return repr(self.data)
def get_length(self):
return len(self.data)
def get_raw(self):
return self.data
def get_instruction(cm, op_value, buff, odex=False):
try:
if not odex and (op_value >= 0xe3 and op_value <= 0xfe):
return InstructionInvalid(cm, buff)
try:
return DALVIK_OPCODES_FORMAT[op_value][0](cm, buff)
except KeyError:
return InstructionInvalid(cm, buff)
except:
return Unresolved(cm, buff)
def get_extented_instruction(cm, op_value, buff):
return DALVIK_OPCODES_EXTENDED_WIDTH[op_value][0]( cm, buff )
def get_optimized_instruction(cm, op_value, buff) :
return DALVIK_OPCODES_OPTIMIZED[op_value][0]( cm, buff )
def get_instruction_payload(op_value, buff) :
return DALVIK_OPCODES_PAYLOAD[op_value][0]( buff )
class LinearSweepAlgorithm :
"""
This class is used to disassemble a method. The algorithm used by this class is linear sweep.
"""
def get_instructions(self, cm, size, insn, idx):
"""
:param cm: a ClassManager object
:type cm: :class:`ClassManager` object
:param size: the total size of the buffer
:type size: int
:param insn: a raw buffer where are the instructions
:type insn: string
:param idx: a start address in the buffer
:type idx: int
:rtype: a generator of :class:`Instruction` objects
"""
self.odex = cm.get_odex_format()
max_idx = size * calcsize('=H')
if max_idx > len(insn):
max_idx = len(insn)
# Get instructions
while idx < max_idx:
obj = None
classic_instruction = True
op_value = unpack('=B', insn[idx])[0]
#print "%x %x" % (op_value, idx)
#payload instructions or extented/optimized instructions
if (op_value == 0x00 or op_value == 0xff) and ((idx + 2) < max_idx):
op_value = unpack('=H', insn[idx:idx + 2])[0]
# payload instructions ?
if op_value in DALVIK_OPCODES_PAYLOAD:
try:
obj = get_instruction_payload(op_value, insn[idx:])
classic_instruction = False
except struct.error:
warning("error while decoding instruction ...")
elif op_value in DALVIK_OPCODES_EXTENDED_WIDTH:
try:
obj = get_extented_instruction(cm, op_value, insn[idx:])
classic_instruction = False
except struct.error, why:
warning("error while decoding instruction ..." + why.__str__())
# optimized instructions ?
elif self.odex and (op_value in DALVIK_OPCODES_OPTIMIZED):
obj = get_optimized_instruction(cm, op_value, insn[idx:])
classic_instruction = False
# classical instructions
if classic_instruction:
op_value = unpack('=B', insn[idx])[0]
obj = get_instruction(cm, op_value, insn[idx:], self.odex)
# emit instruction
yield obj
idx = idx + obj.get_length()
class DCode:
"""
This class represents the instructions of a method
:param class_manager: the ClassManager
:type class_manager: :class:`ClassManager` object
:param offset: the offset of the buffer
:type offset: int
:param size: the total size of the buffer
:type size: int
:param buff: a raw buffer where are the instructions
:type buff: string
"""
def __init__(self, class_manager, offset, size, buff):
self.CM = class_manager
self.insn = buff
self.offset = offset
self.size = size
self.notes = {}
self.cached_instructions = []
self.rcache = 0
self.idx = 0
def get_insn(self):
"""
Get the insn buffer
:rtype: string
"""
return self.insn
def set_insn(self, insn):
"""
Set a new raw buffer to disassemble
:param insn: the buffer
:type insn: string
"""
self.insn = insn
self.size = len(self.insn)
def set_idx(self, idx):
"""
Set the start address of the buffer
:param idx: the index
:type idx: int
"""
self.idx = idx
def set_instructions(self, instructions):
"""
Set the instructions
:param instructions: the list of instructions
:type instructions: a list of :class:`Instruction`
"""
self.cached_instructions = instructions
def get_instructions(self):
"""
Get the instructions
:rtype: a generator of each :class:`Instruction` (or a cached list of instructions if you have setup instructions)
"""
# it is possible to a cache for instructions (avoid a new disasm)
if self.cached_instructions:
for i in self.cached_instructions:
yield i
else:
if self.rcache >= 5:
lsa = LinearSweepAlgorithm()
for i in lsa.get_instructions(self.CM, self.size, self.insn, self.idx):
self.cached_instructions.append(i)
for i in self.cached_instructions:
yield i
else:
self.rcache += 1
if self.size >= 1000:
self.rcache = 5
lsa = LinearSweepAlgorithm()
for i in lsa.get_instructions(self.CM, self.size, self.insn, self.idx):
yield i
def reload(self):
pass
def add_inote(self, msg, idx, off=None):
"""
Add a message to a specific instruction by using (default) the index of the address if specified
:param msg: the message
:type msg: string
:param idx: index of the instruction (the position in the list of the instruction)
:type idx: int
:param off: address of the instruction
:type off: int
"""
if off != None:
idx = self.off_to_pos(off)
if idx not in self.notes:
self.notes[idx] = []
self.notes[idx].append(msg)
def get_instruction(self, idx, off=None):
"""
Get a particular instruction by using (default) the index of the address if specified
:param idx: index of the instruction (the position in the list of the instruction)
:type idx: int
:param off: address of the instruction
:type off: int
:rtype: an :class:`Instruction` object
"""
if off != None:
idx = self.off_to_pos(off)
return [i for i in self.get_instructions()][idx]
def off_to_pos(self, off):
"""
Get the position of an instruction by using the address
:param off: address of the instruction
:type off: int
:rtype: int
"""
idx = 0
nb = 0
for i in self.get_instructions():
if idx == off:
return nb
nb += 1
idx += i.get_length()
return -1
def get_ins_off(self, off):
"""
Get a particular instruction by using the address
:param off: address of the instruction
:type off: int
:rtype: an :class:`Instruction` object
"""
idx = 0
for i in self.get_instructions():
if idx == off:
return i
idx += i.get_length()
return None
def show(self):
"""
Display this object
"""
nb = 0
idx = 0
for i in self.get_instructions():
print "%-8d(%08x)" % (nb, idx),
i.show(nb)
print
idx += i.get_length()
nb += 1
def pretty_show(self, m_a):
"""
Display (with a pretty print) this object
:param m_a: :class:`MethodAnalysis` object
"""
bytecode.PrettyShow(m_a, m_a.basic_blocks.gets(), self.notes)
bytecode.PrettyShowEx(m_a.exceptions.gets())
def get_raw(self):
"""
Return the raw buffer of this object
:rtype: string
"""
return ''.join(i.get_raw() for i in self.get_instructions())
def get_length(self):
"""
Return the length of this object
:rtype: int
"""
return len(self.get_raw())
class TryItem:
"""
This class represents the try_item format
:param buff: a raw buffer where are the try_item format
:type buff: string
:param cm: the ClassManager
:type cm: :class:`ClassManager` object
"""
def __init__(self, buff, cm) :
self.offset = buff.get_idx()
self.__CM = cm
self.start_addr = unpack("=I", buff.read(4))[0]
self.insn_count = unpack("=H", buff.read(2))[0]
self.handler_off = unpack("=H", buff.read(2))[0]
def set_off(self, off) :
self.offset = off
def get_off(self) :
return self.offset
def get_start_addr(self) :
"""
Get the start address of the block of code covered by this entry. The address is a count of 16-bit code units to the start of the first covered instruction.
:rtype: int
"""
return self.start_addr
def get_insn_count(self) :
"""
Get the number of 16-bit code units covered by this entry
:rtype: int
"""
return self.insn_count
def get_handler_off(self) :
"""
Get the offset in bytes from the start of the associated :class:`EncodedCatchHandlerList` to the :class:`EncodedCatchHandler` for this entry.
:rtype: int
"""
return self.handler_off
def get_raw(self) :
return pack("=I", self.start_addr) + pack("=H", self.insn_count) + pack("=H", self.handler_off)
def get_length(self) :
return len(self.get_raw())
class DalvikCode:
"""
This class represents the instructions of a method
:param buff: a raw buffer where are the instructions
:type buff: string
:param cm: the ClassManager
:type cm: :class:`ClassManager` object
"""
def __init__(self, buff, cm):
self.__CM = cm
self.offset = buff.get_idx()
self.int_padding = ""
off = buff.get_idx()
while off % 4 != 0:
self.int_padding += '\00'
off += 1
buff.set_idx(off)
self.__off = buff.get_idx()
self.registers_size = unpack("=H", buff.read(2))[0]
self.ins_size = unpack("=H", buff.read(2))[0]
self.outs_size = unpack("=H", buff.read(2))[0]
self.tries_size = unpack("=H", buff.read(2))[0]
self.debug_info_off = unpack("=I", buff.read(4))[0]
self.insns_size = unpack("=I", buff.read(4))[0]
ushort = calcsize('=H')
self.code = DCode(self.__CM, buff.get_idx(), self.insns_size, buff.read(self.insns_size * ushort))
if (self.insns_size % 2 == 1):
self.padding = unpack("=H", buff.read(2))[0]
self.tries = []
self.handlers = None
if self.tries_size > 0:
for i in xrange(0, self.tries_size):
self.tries.append(TryItem(buff, self.__CM))
self.handlers = EncodedCatchHandlerList(buff, self.__CM)
def get_registers_size(self):
"""
Get the number of registers used by this code
:rtype: int
"""
return self.registers_size
def get_ins_size(self):
"""
Get the number of words of incoming arguments to the method that this code is for
:rtype: int
"""
return self.ins_size
def get_outs_size(self):
"""
Get the number of words of outgoing argument space required by this code for method invocation
:rtype: int
"""
return self.outs_size
def get_tries_size(self):
"""
Get the number of :class:`TryItem` for this instance
:rtype: int
"""
return self.tries_size
def get_debug_info_off(self):
"""
Get the offset from the start of the file to the debug info (line numbers + local variable info) sequence for this code, or 0 if there simply is no information
:rtype: int
"""
return self.debug_info_off
def get_insns_size(self):
"""
Get the size of the instructions list, in 16-bit code units
:rtype: int
"""
return self.insns_size
def get_handlers(self):
"""
Get the bytes representing a list of lists of catch types and associated handler addresses.
:rtype: :class:`EncodedCatchHandlerList`
"""
return self.handlers
def get_tries(self):
"""
Get the array indicating where in the code exceptions are caught and how to handle them
:rtype: a list of :class:`TryItem` objects
"""
return self.tries
def get_debug(self):
"""
Return the associated debug object
:rtype: :class:`DebugInfoItem`
"""
return self.__CM.get_debug_off(self.debug_info_off)
def get_bc(self):
"""
Return the associated code object
:rtype: :class:`DCode`
"""
return self.code
def set_idx(self, idx):
self.code.set_idx(idx)
def reload(self):
self.code.reload()
def get_length(self):
return self.insns_size
def _begin_show(self):
debug("registers_size: %d" % self.registers_size)
debug("ins_size: %d" % self.ins_size)
debug("outs_size: %d" % self.outs_size)
debug("tries_size: %d" % self.tries_size)
debug("debug_info_off: %d" % self.debug_info_off)
debug("insns_size: %d" % self.insns_size)
bytecode._PrintBanner()
def show(self):
self._begin_show()
self.code.show()
self._end_show()
def _end_show(self):
bytecode._PrintBanner()
def pretty_show(self, m_a):
self._begin_show()
self.code.pretty_show(m_a)
self._end_show()
def get_obj(self):
return [self.code, self.tries, self.handlers]
def get_raw(self):
code_raw = self.code.get_raw()
self.insns_size = (len(code_raw) / 2) + (len(code_raw) % 2)
buff = self.int_padding
buff += pack("=H", self.registers_size) + \
pack("=H", self.ins_size) + \
pack("=H", self.outs_size) + \
pack("=H", self.tries_size) + \
pack("=I", self.debug_info_off) + \
pack("=I", self.insns_size) + \
code_raw
# if (self.insns_size % 2 == 1):
# buff += pack("=H", self.padding)
if self.tries_size > 0:
buff += ''.join(i.get_raw() for i in self.tries)
buff += self.handlers.get_raw()
return buff
def add_inote(self, msg, idx, off=None):
"""
Add a message to a specific instruction by using (default) the index of the address if specified
:param msg: the message
:type msg: string
:param idx: index of the instruction (the position in the list of the instruction)
:type idx: int
:param off: address of the instruction
:type off: int
"""
if self.code:
return self.code.add_inote(msg, idx, off)
def get_instruction(self, idx, off=None):
if self.code:
return self.code.get_instruction(idx, off)
def get_size(self):
length = len(self.int_padding)
length += len( pack("=H", self.registers_size) + \
pack("=H", self.ins_size) + \
pack("=H", self.outs_size) + \
pack("=H", self.tries_size) + \
pack("=I", self.debug_info_off) + \
pack("=I", self.insns_size) )
length += self.code.get_length()
if (self.insns_size % 2 == 1) :
length += len(pack("=H", self.padding))
if self.tries_size > 0 :
for i in self.tries :
length += i.get_length()
length += self.handlers.get_length()
return length
def get_off(self) :
return self.__off
class CodeItem :
def __init__(self, size, buff, cm) :
self.__CM = cm
self.offset = buff.get_idx()
self.code = []
self.__code_off = {}
for i in xrange(0, size) :
x = DalvikCode( buff, cm )
self.code.append( x )
self.__code_off[ x.get_off() ] = x
def set_off(self, off) :
self.offset = off
def get_off(self) :
return self.offset
def get_code(self, off) :
try :
return self.__code_off[off]
except KeyError :
return None
def reload(self) :
for i in self.code :
i.reload()
def show(self) :
print "CODE_ITEM"
for i in self.code :
i.show()
def get_obj(self) :
return [ i for i in self.code ]
def get_raw(self) :
return ''.join(i.get_raw() for i in self.code)
def get_length(self) :
length = 0
for i in self.code :
length += i.get_size()
return length
class MapItem :
def __init__(self, buff, cm) :
self.__CM = cm
self.off = buff.get_idx()
self.type = unpack("=H", buff.read(2))[0]
self.unused = unpack("=H", buff.read(2))[0]
self.size = unpack("=I", buff.read(4))[0]
self.offset = unpack("=I", buff.read(4))[0]
self.item = None
buff.set_idx( self.offset )
lazy_analysis = self.__CM.get_lazy_analysis()
if lazy_analysis :
self.next_lazy(buff, cm)
else :
self.next(buff, cm)
def get_off(self) :
return self.off
def get_offset(self) :
return self.offset
def get_type(self) :
return self.type
def get_size(self) :
return self.size
def next(self, buff, cm):
debug("%s @ 0x%x(%d) %x %x" % (TYPE_MAP_ITEM[self.type], buff.get_idx(), buff.get_idx(), self.size, self.offset))
if TYPE_MAP_ITEM[ self.type ] == "TYPE_STRING_ID_ITEM" :
self.item = [ StringIdItem( buff, cm ) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_CODE_ITEM" :
self.item = CodeItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_TYPE_ID_ITEM" :
self.item = TypeHIdItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_PROTO_ID_ITEM" :
self.item = ProtoHIdItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_FIELD_ID_ITEM" :
self.item = FieldHIdItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_METHOD_ID_ITEM" :
self.item = MethodHIdItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_CLASS_DEF_ITEM" :
self.item = ClassHDefItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_HEADER_ITEM" :
self.item = HeaderItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_ANNOTATION_ITEM" :
self.item = [ AnnotationItem( buff, cm ) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_ANNOTATION_SET_ITEM" :
self.item = [ AnnotationSetItem( buff, cm ) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_ANNOTATIONS_DIRECTORY_ITEM" :
self.item = [ AnnotationsDirectoryItem( buff, cm ) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_ANNOTATION_SET_REF_LIST" :
self.item = [ AnnotationSetRefList( buff, cm ) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_TYPE_LIST" :
self.item = [ TypeList( buff, cm ) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_STRING_DATA_ITEM" :
self.item = [ StringDataItem( buff, cm ) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_DEBUG_INFO_ITEM" :
self.item = DebugInfoItemEmpty( buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_ENCODED_ARRAY_ITEM" :
self.item = [ EncodedArrayItem( buff, cm ) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_CLASS_DATA_ITEM" :
self.item = [ ClassDataItem(buff, cm) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_MAP_LIST" :
pass # It's me I think !!!
else :
bytecode.Exit( "Map item %d @ 0x%x(%d) is unknown" % (self.type, buff.get_idx(), buff.get_idx()) )
def next_lazy(self, buff, cm) :
if TYPE_MAP_ITEM[ self.type ] == "TYPE_STRING_ID_ITEM" :
self.item = [ StringIdItem( buff, cm ) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_CODE_ITEM" :
self.item = CodeItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_TYPE_ID_ITEM" :
self.item = TypeIdItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_PROTO_ID_ITEM" :
self.item = ProtoIdItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_FIELD_ID_ITEM" :
self.item = FieldIdItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_METHOD_ID_ITEM" :
self.item = MethodIdItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_CLASS_DEF_ITEM" :
self.item = ClassDefItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_HEADER_ITEM" :
self.item = HeaderItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_TYPE_LIST" :
self.item = [ TypeList( buff, cm ) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_STRING_DATA_ITEM" :
self.item = [ StringDataItem( buff, cm ) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_DEBUG_INFO_ITEM" :
self.item = DebugInfoItemEmpty( buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_ENCODED_ARRAY_ITEM" :
self.item = [ EncodedArrayItem( buff, cm ) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_CLASS_DATA_ITEM" :
self.item = [ ClassDataItem(buff, cm) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_MAP_LIST" :
pass # It's me I think !!!
def reload(self) :
if self.item != None :
if isinstance( self.item, list ):
for i in self.item :
i.reload()
else :
self.item.reload()
def show(self) :
bytecode._Print( "\tMAP_TYPE_ITEM", TYPE_MAP_ITEM[ self.type ])
if self.item != None :
if isinstance( self.item, list ):
for i in self.item :
i.show()
else :
self.item.show()
def pretty_show(self) :
bytecode._Print( "\tMAP_TYPE_ITEM", TYPE_MAP_ITEM[ self.type ])
if self.item != None :
if isinstance( self.item, list ):
for i in self.item :
if isinstance(i, ClassDataItem) :
i.pretty_show()
else :
i.show()
else :
self.item.show()
def get_obj(self) :
return self.item
def get_raw(self) :
if isinstance(self.item, list) :
self.offset = self.item[0].get_off()
else :
self.offset = self.item.get_off()
return pack("=H", self.type) + pack("=H", self.unused) + pack("=I", self.size) + pack("=I", self.offset)
def get_length(self) :
return calcsize( "=HHII" )
def get_item(self) :
return self.item
def set_item(self, item) :
self.item = item
class OffObj:
def __init__(self, o):
self.off = o
class ClassManager:
"""
This class is used to access to all elements (strings, type, proto ...) of the dex format
"""
def __init__(self, vm, config):
self.vm = vm
self.buff = vm
self.decompiler_ob = None
self.vmanalysis_ob = None
self.gvmanalysis_ob = None
self.__manage_item = {}
self.__manage_item_off = []
self.__strings_off = {}
self.__obj_offset = {}
self.__item_offset = {}
self.__cached_type_list = {}
self.__cached_proto = {}
self.recode_ascii_string = config["RECODE_ASCII_STRING"]
self.recode_ascii_string_meth = None
if config["RECODE_ASCII_STRING_METH"]:
self.recode_ascii_string_meth = config["RECODE_ASCII_STRING_METH"]
self.lazy_analysis = config["LAZY_ANALYSIS"]
self.hook_strings = {}
self.engine = []
self.engine.append("python")
if self.vm != None:
self.odex_format = self.vm.get_format_type() == "ODEX"
def get_ascii_string(self, s):
try:
return s.decode("ascii")
except UnicodeDecodeError:
d = ""
for i in s:
if ord(i) < 128:
d += i
else:
d += "%x" % ord(i)
return d
def get_odex_format(self):
return self.odex_format
def get_obj_by_offset(self, offset) :
return self.__obj_offset[ offset ]
def get_item_by_offset(self, offset) :
return self.__item_offset[ offset ]
def get_string_by_offset(self, offset) :
return self.__strings_off[ offset ]
def get_lazy_analysis(self) :
return self.lazy_analysis
def get_vmanalysis(self) :
return self.vmanalysis_ob
def set_vmanalysis(self, vmanalysis) :
self.vmanalysis_ob = vmanalysis
def get_gvmanalysis(self) :
return self.gvmanalysis_ob
def set_gvmanalysis(self, gvmanalysis) :
self.gvmanalysis_ob = gvmanalysis
def set_decompiler(self, decompiler) :
self.decompiler_ob = decompiler
def get_engine(self) :
return self.engine[0]
def get_all_engine(self) :
return self.engine
def add_type_item(self, type_item, c_item, item) :
self.__manage_item[ type_item ] = item
self.__obj_offset[ c_item.get_off() ] = c_item
self.__item_offset[ c_item.get_offset() ] = item
sdi = False
if type_item == "TYPE_STRING_DATA_ITEM" :
sdi = True
if item != None :
if isinstance(item, list) :
for i in item :
goff = i.offset
self.__manage_item_off.append( goff )
self.__obj_offset[ i.get_off() ] = i
if sdi == True :
self.__strings_off[ goff ] = i
else :
self.__manage_item_off.append( c_item.get_offset() )
def get_code(self, idx) :
try :
return self.__manage_item[ "TYPE_CODE_ITEM" ].get_code( idx )
except KeyError :
return None
def get_class_data_item(self, off) :
for i in self.__manage_item[ "TYPE_CLASS_DATA_ITEM" ] :
if i.get_off() == off :
return i
bytecode.Exit( "unknown class data item @ 0x%x" % off )
def get_encoded_array_item(self, off) :
for i in self.__manage_item["TYPE_ENCODED_ARRAY_ITEM" ] :
if i.get_off() == off :
return i
def get_string(self, idx) :
if idx in self.hook_strings :
return self.hook_strings[ idx ]
try :
off = self.__manage_item[ "TYPE_STRING_ID_ITEM" ][idx].get_string_data_off()
except IndexError :
bytecode.Warning( "unknown string item @ %d" % (idx) )
return "AG:IS: invalid string"
try:
if self.recode_ascii_string:
if self.recode_ascii_string_meth:
return self.recode_ascii_string_meth(self.__strings_off[off].get())
return self.get_ascii_string(self.__strings_off[off].get())
return self.__strings_off[off].get()
except KeyError:
bytecode.Warning( "unknown string item @ 0x%x(%d)" % (off,idx) )
return "AG:IS: invalid string"
def get_raw_string(self, idx) :
try :
off = self.__manage_item[ "TYPE_STRING_ID_ITEM" ][idx].get_string_data_off()
except IndexError :
bytecode.Warning( "unknown string item @ %d" % (idx) )
return "AG:IS: invalid string"
try :
return self.__strings_off[off].get()
except KeyError :
bytecode.Warning( "unknown string item @ 0x%x(%d)" % (off,idx) )
return "AG:IS: invalid string"
def get_type_list(self, off) :
if off == 0 :
return "()"
if off in self.__cached_type_list :
return self.__cached_type_list[ off ]
for i in self.__manage_item[ "TYPE_TYPE_LIST" ] :
if i.get_type_list_off() == off :
ret = "(" + i.get_string() + ")"
self.__cached_type_list[ off ] = ret
return ret
return None
def get_type(self, idx) :
_type = self.__manage_item[ "TYPE_TYPE_ID_ITEM" ].get( idx )
if _type == -1 :
return "AG:ITI: invalid type"
return self.get_string( _type )
def get_type_ref(self, idx) :
return self.__manage_item[ "TYPE_TYPE_ID_ITEM" ].get( idx )
def get_proto(self, idx) :
try :
proto = self.__cached_proto[ idx ]
except KeyError :
proto = self.__manage_item[ "TYPE_PROTO_ID_ITEM" ].get( idx )
self.__cached_proto[ idx ] = proto
return [ proto.get_parameters_off_value(), proto.get_return_type_idx_value() ]
def get_field(self, idx) :
field = self.__manage_item[ "TYPE_FIELD_ID_ITEM" ].get( idx )
return [ field.get_class_name(), field.get_type(), field.get_name() ]
def get_field_ref(self, idx) :
return self.__manage_item[ "TYPE_FIELD_ID_ITEM" ].get( idx )
def get_method(self, idx) :
method = self.__manage_item[ "TYPE_METHOD_ID_ITEM" ].get( idx )
return method.get_list()
def get_method_ref(self, idx) :
return self.__manage_item[ "TYPE_METHOD_ID_ITEM" ].get( idx )
def set_hook_class_name(self, class_def, value) :
_type = self.__manage_item[ "TYPE_TYPE_ID_ITEM" ].get( class_def.get_class_idx() )
self.set_hook_string( _type, value )
self.vm._delete_python_export_class( class_def )
class_def.reload()
# FIXME
self.__manage_item[ "TYPE_METHOD_ID_ITEM" ].reload()
for i in class_def.get_methods() :
i.reload()
for i in class_def.get_fields() :
i.reload()
self.vm._create_python_export_class( class_def )
def set_hook_method_name(self, encoded_method, value):
python_export = True
method = self.__manage_item[ "TYPE_METHOD_ID_ITEM" ].get( encoded_method.get_method_idx() )
self.set_hook_string( method.get_name_idx(), value )
class_def = self.__manage_item[ "TYPE_CLASS_DEF_ITEM" ].get_class_idx( method.get_class_idx() )
if class_def != None:
try:
name = "METHOD_" + bytecode.FormatNameToPython( encoded_method.get_name() )
except AttributeError:
name += "_" + bytecode.FormatDescriptorToPython(encoded_method.get_descriptor())
try:
delattr(class_def, name)
except AttributeError:
python_export = False
if python_export:
name = "METHOD_" + bytecode.FormatNameToPython(value)
setattr(class_def, name, encoded_method)
method.reload()
def set_hook_field_name(self, encoded_field, value):
python_export = True
field = self.__manage_item[ "TYPE_FIELD_ID_ITEM" ].get( encoded_field.get_field_idx() )
self.set_hook_string( field.get_name_idx(), value )
class_def = self.__manage_item[ "TYPE_CLASS_DEF_ITEM" ].get_class_idx( field.get_class_idx() )
if class_def != None :
try :
name = "FIELD_" + bytecode.FormatNameToPython( encoded_field.get_name() )
except AttributeError:
name += "_" + bytecode.FormatDescriptorToPython( encoded_field.get_descriptor() )
try:
delattr( class_def, name )
except AttributeError:
python_export = False
if python_export:
name = "FIELD_" + bytecode.FormatNameToPython( value )
setattr( class_def, name, encoded_field )
field.reload()
def set_hook_string(self, idx, value) :
self.hook_strings[ idx ] = value
def get_next_offset_item(self, idx) :
for i in self.__manage_item_off :
if i > idx :
return i
return idx
def get_debug_off(self, off) :
self.buff.set_idx( off )
return DebugInfoItem( self.buff, self )
class MapList :
"""
This class can parse the "map_list" of the dex format
"""
def __init__(self, cm, off, buff) :
self.CM = cm
buff.set_idx( off )
self.offset = off
self.size = unpack("=I", buff.read( 4 ) )[0]
self.map_item = []
for i in xrange(0, self.size) :
idx = buff.get_idx()
mi = MapItem( buff, self.CM )
self.map_item.append( mi )
buff.set_idx( idx + mi.get_length() )
c_item = mi.get_item()
if c_item == None :
mi.set_item( self )
c_item = mi.get_item()
self.CM.add_type_item( TYPE_MAP_ITEM[ mi.get_type() ], mi, c_item )
for i in self.map_item :
i.reload()
def reload(self) :
pass
def get_off(self) :
return self.offset
def set_off(self, off) :
self.offset = off
def get_item_type(self, ttype) :
"""
Get a particular item type
:param ttype: a string which represents the desired type
:rtype: None or the item object
"""
for i in self.map_item :
if TYPE_MAP_ITEM[ i.get_type() ] == ttype :
return i.get_item()
return None
def show(self) :
"""
Print the MapList object
"""
bytecode._Print("MAP_LIST SIZE", self.size)
for i in self.map_item :
if i.item != self :
i.show()
def pretty_show(self) :
"""
Print with a pretty display the MapList object
"""
bytecode._Print("MAP_LIST SIZE", self.size)
for i in self.map_item :
if i.item != self :
i.pretty_show()
def get_obj(self) :
return [ x.get_obj() for x in self.map_item ]
def get_raw(self) :
return pack("=I", self.size) + ''.join(x.get_raw() for x in self.map_item)
def get_class_manager(self) :
return self.CM
def get_length(self) :
return len(self.get_raw())
class XREF :
def __init__(self) :
self.items = []
def add(self, x, y):
self.items.append((x, y))
class DREF :
def __init__(self) :
self.items = []
def add(self, x, y):
self.items.append((x, y))
class DalvikVMFormat(bytecode._Bytecode):
"""
This class can parse a classes.dex file of an Android application (APK).
:param buff: a string which represents the classes.dex file
:param decompiler: associate a decompiler object to display the java source code
:type buff: string
:type decompiler: object
:Example:
DalvikVMFormat( open("classes.dex", "rb").read() )
"""
def __init__(self, buff, decompiler=None, config=None):
super(DalvikVMFormat, self).__init__(buff)
self.config = config
if not self.config:
self.config = {"RECODE_ASCII_STRING": CONF["RECODE_ASCII_STRING"],
"RECODE_ASCII_STRING_METH": CONF["RECODE_ASCII_STRING_METH"],
"LAZY_ANALYSIS": CONF["LAZY_ANALYSIS"]}
self.CM = ClassManager(self, self.config)
self.CM.set_decompiler(decompiler)
self._preload(buff)
self._load(buff)
def _preload(self, buff):
pass
def _load(self, buff):
self.__header = HeaderItem(0, self, ClassManager(None, self.config))
if self.__header.map_off == 0:
bytecode.Warning("no map list ...")
else:
self.map_list = MapList( self.CM, self.__header.map_off, self )
self.classes = self.map_list.get_item_type( "TYPE_CLASS_DEF_ITEM" )
self.methods = self.map_list.get_item_type( "TYPE_METHOD_ID_ITEM" )
self.fields = self.map_list.get_item_type( "TYPE_FIELD_ID_ITEM" )
self.codes = self.map_list.get_item_type( "TYPE_CODE_ITEM" )
self.strings = self.map_list.get_item_type( "TYPE_STRING_DATA_ITEM" )
self.debug = self.map_list.get_item_type( "TYPE_DEBUG_INFO_ITEM" )
self.header = self.map_list.get_item_type( "TYPE_HEADER_ITEM" )
self.classes_names = None
self.__cache_methods = None
self.__cached_methods_idx = None
def get_classes_def_item(self) :
"""
This function returns the class def item
:rtype: :class:`ClassDefItem` object
"""
return self.classes
def get_methods_id_item(self) :
"""
This function returns the method id item
:rtype: :class:`MethodIdItem` object
"""
return self.methods
def get_fields_id_item(self) :
"""
This function returns the field id item
:rtype: :class:`FieldIdItem` object
"""
return self.fields
def get_codes_item(self) :
"""
This function returns the code item
:rtype: :class:`CodeItem` object
"""
return self.codes
def get_string_data_item(self) :
"""
This function returns the string data item
:rtype: :class:`StringDataItem` object
"""
return self.strings
def get_debug_info_item(self) :
"""
This function returns the debug info item
:rtype: :class:`DebugInfoItem` object
"""
return self.debug
def get_header_item(self) :
"""
This function returns the header item
:rtype: :class:`HeaderItem` object
"""
return self.header
def get_class_manager(self) :
"""
This function returns a ClassManager object which allow you to get
access to all index references (strings, methods, fields, ....)
:rtype: :class:`ClassManager` object
"""
return self.CM
def show(self) :
"""
Show the all information in the object
"""
self.map_list.show()
def pretty_show(self):
"""
Show (but pretty !) the all information in the object
"""
self.map_list.pretty_show()
def save(self):
"""
Return the dex (with the modifications) into raw format (fix checksums)
(beta: do not use !)
:rtype: string
"""
l = []
h = {}
s = {}
h_r = {}
idx = 0
for i in self.map_list.get_obj():
length = 0
if isinstance(i, list):
for j in i:
if isinstance(j, AnnotationsDirectoryItem) :
if idx % 4 != 0 :
idx = idx + (4 - (idx % 4))
l.append( j )
c_length = j.get_length()
h[ j ] = idx + length
h_r[ idx + length ] = j
s[ idx + length ] = c_length
length += c_length
#debug("SAVE" + str(j) + " @ 0x%x" % (idx+length))
debug("SAVE " + str(i[0]) + " @0x%x (%x)" % (idx, length))
else :
if isinstance(i, MapList) :
if idx % 4 != 0 :
idx = idx + (4 - (idx % 4))
l.append( i )
h[ i ] = idx
h_r[ idx ] = i
length = i.get_length()
s[idx] = length
debug("SAVE " + str(i) + " @0x%x (%x)" % (idx, length))
idx += length
self.header.file_size = idx
last_idx = 0
for i in l :
idx = h[ i ]
i.set_off( h[ i ] )
# print i, hex(h[ i ])
last_idx = idx + s[ idx ]
last_idx = 0
buff = ""
for i in l :
idx = h[ i ]
if idx != last_idx :
debug( "Adjust alignment @%x with 00 %x" % (idx, idx - last_idx) )
buff += "\x00" * (idx - last_idx)
buff += i.get_raw()
last_idx = idx + s[ idx ]
debug("GLOBAL SIZE %d" % len(buff))
return self.fix_checksums(buff)
def fix_checksums(self, buff) :
"""
Fix a dex format buffer by setting all checksums
:rtype: string
"""
import zlib
import hashlib
signature = hashlib.sha1(buff[32:]).digest()
buff = buff[:12] + signature + buff[32:]
checksum = zlib.adler32(buff[12:])
buff = buff[:8] + pack("=i", checksum) + buff[12:]
debug("NEW SIGNATURE %s" % repr(signature))
debug("NEW CHECKSUM %x" % checksum)
return buff
def get_cm_field(self, idx) :
"""
Get a specific field by using an index
:param idx: index of the field
:type idx: int
"""
return self.CM.get_field(idx)
def get_cm_method(self, idx) :
"""
Get a specific method by using an index
:param idx: index of the method
:type idx: int
"""
return self.CM.get_method(idx)
def get_cm_string(self, idx) :
"""
Get a specific string by using an index
:param idx: index of the string
:type idx: int
"""
return self.CM.get_raw_string( idx )
def get_cm_type(self, idx) :
"""
Get a specific type by using an index
:param idx: index of the type
:type idx: int
"""
return self.CM.get_type( idx )
def get_classes_names(self) :
"""
Return the names of classes
:rtype: a list of string
"""
if self.classes_names == None :
self.classes_names = [ i.get_name() for i in self.classes.class_def ]
return self.classes_names
def get_classes(self) :
"""
Return all classes
:rtype: a list of :class:`ClassDefItem` objects
"""
return self.classes.class_def
def get_class(self, name):
"""
Return a specific class
:param name: the name of the class
:rtype: a :class:`ClassDefItem` object
"""
for i in self.classes.class_def:
if i.get_name() == name:
return i
return None
def get_method(self, name) :
"""
Return a list all methods which corresponds to the regexp
:param name: the name of the method (a python regexp)
:rtype: a list with all :class:`EncodedMethod` objects
"""
prog = re.compile(name)
l = []
for i in self.classes.class_def :
for j in i.get_methods() :
if prog.match( j.get_name() ) :
l.append( j )
return l
def get_field(self, name) :
"""
Return a list all fields which corresponds to the regexp
:param name: the name of the field (a python regexp)
:rtype: a list with all :class:`EncodedField` objects
"""
prog = re.compile(name)
l = []
for i in self.classes.class_def :
for j in i.get_fields() :
if prog.match( j.get_name() ) :
l.append( j )
return l
def get_all_fields(self) :
"""
Return a list of field items
:rtype: a list of :class:`FieldIdItem` objects
"""
try :
return self.fields.gets()
except AttributeError :
return []
def get_fields(self) :
"""
Return all field objects
:rtype: a list of :class:`EncodedField` objects
"""
l = []
for i in self.classes.class_def :
for j in i.get_fields() :
l.append( j )
return l
def get_methods(self) :
"""
Return all method objects
:rtype: a list of :class:`EncodedMethod` objects
"""
l = []
for i in self.classes.class_def :
for j in i.get_methods() :
l.append( j )
return l
def get_len_methods(self) :
"""
Return the number of methods
:rtype: int
"""
return len( self.get_methods() )
def get_method_by_idx(self, idx) :
"""
Return a specific method by using an index
:param idx: the index of the method
:type idx: int
:rtype: None or an :class:`EncodedMethod` object
"""
if self.__cached_methods_idx == None :
self.__cached_methods_idx = {}
for i in self.classes.class_def :
for j in i.get_methods() :
self.__cached_methods_idx[ j.get_method_idx() ] = j
try :
return self.__cached_methods_idx[ idx ]
except KeyError :
return None
def get_method_descriptor(self, class_name, method_name, descriptor) :
"""
Return the specific method
:param class_name: the class name of the method
:type class_name: string
:param method_name: the name of the method
:type method_name: string
:param descriptor: the descriptor of the method
:type descriptor: string
:rtype: None or a :class:`EncodedMethod` object
"""
key = class_name + method_name + descriptor
if self.__cache_methods == None :
self.__cache_methods = {}
for i in self.classes.class_def :
for j in i.get_methods() :
self.__cache_methods[ j.get_class_name() + j.get_name() + j.get_descriptor() ] = j
try :
return self.__cache_methods[ key ]
except KeyError :
return None
def get_methods_descriptor(self, class_name, method_name):
"""
Return the specific methods of the class
:param class_name: the class name of the method
:type class_name: string
:param method_name: the name of the method
:type method_name: string
:rtype: None or a :class:`EncodedMethod` object
"""
l = []
for i in self.classes.class_def:
if i.get_name() == class_name:
for j in i.get_methods():
if j.get_name() == method_name:
l.append(j)
return l
def get_methods_class(self, class_name) :
"""
Return all methods of a specific class
:param class_name: the class name
:type class_name: string
:rtype: a list with :class:`EncodedMethod` objects
"""
l = []
for i in self.classes.class_def :
for j in i.get_methods() :
if class_name == j.get_class_name() :
l.append( j )
return l
def get_fields_class(self, class_name) :
"""
Return all fields of a specific class
:param class_name: the class name
:type class_name: string
:rtype: a list with :class:`EncodedField` objects
"""
l = []
for i in self.classes.class_def :
for j in i.get_fields() :
if class_name == j.get_class_name() :
l.append( j )
return l
def get_field_descriptor(self, class_name, field_name, descriptor) :
"""
Return the specific field
:param class_name: the class name of the field
:type class_name: string
:param field_name: the name of the field
:type field_name: string
:param descriptor: the descriptor of the field
:type descriptor: string
:rtype: None or a :class:`EncodedField` object
"""
for i in self.classes.class_def :
if class_name == i.get_name() :
for j in i.get_fields() :
if field_name == j.get_name() and descriptor == j.get_descriptor() :
return j
return None
def get_strings(self) :
"""
Return all strings
:rtype: a list with all strings used in the format (types, names ...)
"""
return [i.get() for i in self.strings]
def get_regex_strings(self, regular_expressions) :
"""
Return all target strings matched the regex
:param regular_expressions: the python regex
:type regular_expressions: string
:rtype: a list of strings matching the regex expression
"""
str_list = []
if regular_expressions.count is None :
return None
for i in self.get_strings() :
if re.match(regular_expressions, i) :
str_list.append(i)
return str_list
def get_format_type(self):
"""
Return the type
:rtype: a string
"""
return "DEX"
def create_xref(self, python_export=True):
"""
Create XREF for this object
:param python_export (boolean): export xref in each method
"""
gvm = self.CM.get_gvmanalysis()
for _class in self.get_classes():
key = _class.get_name()
if key in gvm.nodes:
_class.XREFfrom = XREF()
for i in gvm.GI.successors(gvm.nodes[key].id):
xref = gvm.nodes_id[i]
xref_meth = self.get_method_descriptor(xref.class_name, xref.method_name, xref.descriptor)
if python_export == True:
name = bytecode.FormatClassToPython(xref_meth.get_class_name()) + "__" + \
bytecode.FormatNameToPython(xref_meth.get_name()) + "__" + \
bytecode.FormatDescriptorToPython(xref_meth.get_descriptor())
setattr(_class.XREFfrom, name, xref_meth)
_class.XREFfrom.add(xref_meth, xref.edges[gvm.nodes[key]])
for method in _class.get_methods():
method.XREFfrom = XREF()
method.XREFto = XREF()
key = "%s %s %s" % (method.get_class_name(), method.get_name(), method.get_descriptor())
if key in gvm.nodes:
for i in gvm.G.predecessors(gvm.nodes[key].id):
xref = gvm.nodes_id[i]
xref_meth = self.get_method_descriptor(xref.class_name, xref.method_name, xref.descriptor)
if xref_meth != None:
name = bytecode.FormatClassToPython(xref_meth.get_class_name()) + "__" + \
bytecode.FormatNameToPython(xref_meth.get_name()) + "__" + \
bytecode.FormatDescriptorToPython(xref_meth.get_descriptor())
if python_export == True:
setattr(method.XREFfrom, name, xref_meth)
method.XREFfrom.add(xref_meth, xref.edges[gvm.nodes[key]])
for i in gvm.G.successors(gvm.nodes[key].id):
xref = gvm.nodes_id[i]
xref_meth = self.get_method_descriptor(xref.class_name, xref.method_name, xref.descriptor)
if xref_meth != None:
name = bytecode.FormatClassToPython(xref_meth.get_class_name()) + "__" + \
bytecode.FormatNameToPython(xref_meth.get_name()) + "__" + \
bytecode.FormatDescriptorToPython(xref_meth.get_descriptor())
if python_export == True:
setattr(method.XREFto, name, xref_meth)
method.XREFto.add(xref_meth, gvm.nodes[key].edges[xref])
def create_dref(self, python_export=True):
"""
Create DREF for this object
:param python_export (boolean): export dref in each field
"""
vmx = self.CM.get_vmanalysis()
for _class in self.get_classes() :
for field in _class.get_fields() :
field.DREFr = DREF()
field.DREFw = DREF()
paths = vmx.tainted_variables.get_field( field.get_class_name(), field.get_name(), field.get_descriptor() )
if paths != None :
access = {}
access["R"] = {}
access["W"] = {}
for path in paths.get_paths() :
access_val, idx = path[0]
m_idx = path[1]
if access_val == 'R' :
dref_meth = self.get_method_by_idx( m_idx )
name = bytecode.FormatClassToPython( dref_meth.get_class_name() ) + "__" + \
bytecode.FormatNameToPython( dref_meth.get_name() ) + "__" + \
bytecode.FormatDescriptorToPython( dref_meth.get_descriptor() )
if python_export == True :
setattr( field.DREFr, name, dref_meth )
try :
access["R"][ dref_meth ].append( idx )
except KeyError :
access["R"][ dref_meth ] = []
access["R"][ dref_meth ].append( idx )
else :
dref_meth = self.get_method_by_idx( m_idx )
name = bytecode.FormatClassToPython( dref_meth.get_class_name() ) + "__" + \
bytecode.FormatNameToPython( dref_meth.get_name() ) + "__" + \
bytecode.FormatDescriptorToPython( dref_meth.get_descriptor() )
if python_export == True :
setattr( field.DREFw, name, dref_meth )
try :
access["W"][ dref_meth ].append( idx )
except KeyError :
access["W"][ dref_meth ] = []
access["W"][ dref_meth ].append( idx )
for i in access["R"] :
field.DREFr.add( i, access["R"][i] )
for i in access["W"] :
field.DREFw.add( i, access["W"][i] )
def create_python_export(self) :
"""
Export classes/methods/fields' names in the python namespace
"""
for _class in self.get_classes() :
self._create_python_export_class(_class)
def _delete_python_export_class(self, _class) :
self._create_python_export_class( _class, True)
def _create_python_export_class(self, _class, delete=False) :
if _class != None :
### Class
name = "CLASS_" + bytecode.FormatClassToPython( _class.get_name() )
if delete :
delattr( self, name )
return
else :
setattr( self, name, _class )
### Methods
m = {}
for method in _class.get_methods() :
if method.get_name() not in m :
m[ method.get_name() ] = []
m[ method.get_name() ].append( method )
for i in m :
if len(m[i]) == 1 :
j = m[i][0]
name = "METHOD_" + bytecode.FormatNameToPython( j.get_name() )
setattr( _class, name, j )
else :
for j in m[i] :
name = "METHOD_" + bytecode.FormatNameToPython( j.get_name() ) + "_" + bytecode.FormatDescriptorToPython( j.get_descriptor() )
setattr( _class, name, j )
### Fields
f = {}
for field in _class.get_fields() :
if field.get_name() not in f :
f[ field.get_name() ] = []
f[ field.get_name() ].append( field )
for i in f :
if len(f[i]) == 1 :
j = f[i][0]
name = "FIELD_" + bytecode.FormatNameToPython( j.get_name() )
setattr( _class, name, j )
else :
for j in f[i] :
name = "FIELD_" + bytecode.FormatNameToPython( j.get_name() ) + "_" + bytecode.FormatDescriptorToPython( j.get_descriptor() )
setattr( _class, name, j )
def get_BRANCH_DVM_OPCODES(self) :
return BRANCH_DVM_OPCODES
def get_determineNext(self) :
return determineNext
def get_determineException(self) :
return determineException
def get_DVM_TOSTRING(self):
return DVM_TOSTRING()
def set_decompiler(self, decompiler):
self.CM.set_decompiler(decompiler)
def set_vmanalysis(self, vmanalysis):
self.CM.set_vmanalysis(vmanalysis)
def set_gvmanalysis(self, gvmanalysis):
self.CM.set_gvmanalysis(gvmanalysis)
def disassemble(self, offset, size):
"""
Disassembles a given offset in the DEX file
:param dex: the filename of the android dex file
:type filename: string
:param offset: offset to disassemble in the file (from the beginning of the file)
:type offset: int
:param size:
:type size:
"""
for i in DCode(self.CM, offset, size, self.get_buff()[offset:offset + size]).get_instructions():
yield i
def _get_class_hierarchy(self):
ids = {}
present = {}
r_ids = {}
to_add = {}
els = []
for current_class in self.get_classes():
s_name = current_class.get_superclassname()[1:-1]
c_name = current_class.get_name()[1:-1]
if s_name not in ids:
ids[s_name] = len(ids) + 1
r_ids[ids[s_name]] = s_name
if c_name not in ids:
ids[c_name] = len(ids) + 1
els.append([ids[c_name], ids[s_name], c_name])
present[ids[c_name]] = True
for i in els:
if i[1] not in present:
to_add[i[1]] = r_ids[i[1]]
for i in to_add:
els.append([i, 0, to_add[i]])
treeMap = {}
Root = bytecode.Node(0, "Root")
treeMap[Root.id] = Root
for element in els:
nodeId, parentId, title = element
if not nodeId in treeMap:
treeMap[nodeId] = bytecode.Node(nodeId, title)
else:
treeMap[nodeId].id = nodeId
treeMap[nodeId].title = title
if not parentId in treeMap:
treeMap[parentId] = bytecode.Node(0, '')
treeMap[parentId].children.append(treeMap[nodeId])
return Root
def print_classes_hierarchy(self):
def print_map(node, l, lvl=0):
for n in node.children:
if lvl == 0:
l.append("%s" % (n.title))
else:
l.append("%s %s" % ('\t' * lvl, n.title))
if len(n.children) > 0:
print_map(n, l, lvl + 1)
l = []
print_map(self._get_class_hierarchy(), l)
return l
def list_classes_hierarchy(self):
def print_map(node, l):
if node.title not in l:
l[node.title] = []
for n in node.children:
if len(n.children) > 0:
w = {}
w[n.title] = []
l[node.title].append(w)
print_map(n, w)
else:
l[node.title].append(n.title)
l = {}
print_map(self._get_class_hierarchy(), l)
return l
def get_format(self):
objs = self.map_list.get_obj()
h = {}
index = {}
self._get_objs(h, index, objs)
return h, index
def _get_objs(self, h, index, objs):
for i in objs:
if isinstance(i, list):
self._get_objs(h, index, i)
else:
try:
if i != None:
h[i] = {}
index[i] = i.offset
except AttributeError:
pass
try:
if not isinstance(i, MapList):
next_objs = i.get_obj()
if isinstance(next_objs, list):
self._get_objs(h[i], index, next_objs)
except AttributeError:
pass
def colorize_operands(self, operands, colors):
for operand in operands:
if operand[0] == OPERAND_REGISTER:
yield "%sv%d%s" % (colors["registers"], operand[1], colors["normal"])
elif operand[0] == OPERAND_LITERAL:
yield "%s%d%s" % (colors["literal"], operand[1], colors["normal"])
elif operand[0] == OPERAND_RAW:
yield "%s%s%s" % (colors["raw"], operand[1], colors["normal"])
elif operand[0] == OPERAND_OFFSET:
yield "%s%d%s" % (colors["offset"], operand[1], colors["normal"])
elif operand[0] & OPERAND_KIND:
if operand[0] == (OPERAND_KIND + KIND_STRING):
yield "%s%s%s" % (colors["string"], operand[2], colors["normal"])
elif operand[0] == (OPERAND_KIND + KIND_METH):
yield "%s%s%s" % (colors["meth"], operand[2], colors["normal"])
elif operand[0] == (OPERAND_KIND + KIND_FIELD):
yield "%s%s%s" % (colors["field"], operand[2], colors["normal"])
elif operand[0] == (OPERAND_KIND + KIND_TYPE):
yield "%s%s%s" % (colors["type"], operand[2], colors["normal"])
else:
yield "%s" % repr(operands[2])
else:
yield "%s" % repr(operands[1])
def get_operand_html(self, operand, registers_colors, colors, escape_fct, wrap_fct):
if operand[0] == OPERAND_REGISTER:
return "<FONT color=\"%s\">v%s</FONT>" % (registers_colors[operand[1]], operand[1])
elif operand[0] == OPERAND_LITERAL:
return "<FONT color=\"%s\">0x%x</FONT>" % (colors["literal"], operand[1])
elif operand[0] == OPERAND_RAW:
if len(operand[1]) > 32:
wrapped = wrap_fct(operand[1], 32)
wrapped_adjust = "<br/>" + "<br/>".join(escape_fct(repr(i)[1:-1]) for i in wrapped)
return "<FONT color=\"%s\">%s</FONT>" % (colors["raw"], wrapped_adjust)
return "<FONT color=\"%s\">%s</FONT>" % (colors["raw"], escape_fct(repr(operand[1])[1:-1]))
elif operand[0] == OPERAND_OFFSET:
return "<FONT FACE=\"Times-Italic\" color=\"%s\">0x%x</FONT>" % (colors["offset"], operand[1])
elif operand[0] & OPERAND_KIND:
if operand[0] == (OPERAND_KIND + KIND_STRING):
if len(operand[2]) > 32:
wrapped = wrap_fct(operand[2], 32)
wrapped_adjust = "<br/>" + "<br/>".join(escape_fct(i) for i in wrapped)
return "<FONT color=\"%s\">%s</FONT>" % (colors["string"], wrapped_adjust)
return "<FONT color=\"%s\">%s</FONT>" % (colors["string"], escape_fct(operand[2]))
elif operand[0] == (OPERAND_KIND + KIND_METH):
return "<FONT color=\"%s\">%s</FONT>" % (colors["method"], escape_fct(operand[2]))
elif operand[0] == (OPERAND_KIND + KIND_FIELD):
return "<FONT color=\"%s\">%s</FONT>" % (colors["field"], escape_fct(operand[2]))
elif operand[0] == (OPERAND_KIND + KIND_TYPE):
return "<FONT color=\"%s\">%s</FONT>" % (colors["type"], escape_fct(operand[2]))
return escape_fct(str(operand[2]))
return escape_fct(str(operand[1]))
class OdexHeaderItem:
"""
This class can parse the odex header
:param buff: a Buff object string which represents the odex dependencies
"""
def __init__(self, buff):
buff.set_idx(8)
self.dex_offset = unpack("=I", buff.read(4))[0]
self.dex_length = unpack("=I", buff.read(4))[0]
self.deps_offset = unpack("=I", buff.read(4))[0]
self.deps_length = unpack("=I", buff.read(4))[0]
self.aux_offset = unpack("=I", buff.read(4))[0]
self.aux_length = unpack("=I", buff.read(4))[0]
self.flags = unpack("=I", buff.read(4))[0]
self.padding = unpack("=I", buff.read(4))[0]
def show(self):
print "dex_offset:%x dex_length:%x deps_offset:%x deps_length:%x aux_offset:%x aux_length:%x flags:%x" % (self.dex_offset,
self.dex_length,
self.deps_offset,
self.deps_length,
self.aux_offset,
self.aux_length,
self.flags)
def get_raw(self):
return pack("=I", self.dex_offset) + \
pack("=I", self.dex_length) + \
pack("=I", self.deps_offset) + \
pack("=I", self.deps_length) + \
pack("=I", self.aux_offset) + \
pack("=I", self.aux_length) + \
pack("=I", self.flags) + \
pack("=I", self.padding)
class OdexDependencies:
"""
This class can parse the odex dependencies
:param buff: a Buff object string which represents the odex dependencies
"""
def __init__(self, buff):
self.modification_time = unpack("=I", buff.read(4))[0]
self.crc = unpack("=I", buff.read(4))[0]
self.dalvik_build = unpack("=I", buff.read(4))[0]
self.dependency_count = unpack("=I", buff.read(4))[0]
self.dependencies = []
self.dependency_checksums = []
for i in range(0, self.dependency_count):
string_length = unpack("=I", buff.read(4))[0]
name_dependency = buff.read(string_length)
self.dependencies.append(name_dependency)
self.dependency_checksums.append(buff.read(20))
def get_dependencies(self):
"""
Return the list of dependencies
:rtype: a list of strings
"""
return self.dependencies
def get_raw(self):
dependencies = ""
for idx, value in enumerate(self.dependencies):
dependencies += pack("=I", len(value)) + \
pack("=%ds" % len(value), value) + \
pack("=20s", self.dependency_checksums[idx])
return pack("=I", self.modification_time) + \
pack("=I", self.crc) + \
pack("=I", self.dalvik_build) + \
pack("=I", self.dependency_count) + \
dependencies
class DalvikOdexVMFormat(DalvikVMFormat):
"""
This class can parse an odex file
:param buff: a string which represents the odex file
:param decompiler: associate a decompiler object to display the java source code
:type buff: string
:type decompiler: object
:Example:
DalvikOdexVMFormat( open("classes.odex", "rb").read() )
"""
def _preload(self, buff):
self.orig_buff = buff
self.magic = buff[:8]
if self.magic == ODEX_FILE_MAGIC_35 or self.magic == ODEX_FILE_MAGIC_36:
self.odex_header = OdexHeaderItem(self)
self.set_idx(self.odex_header.deps_offset)
self.dependencies = OdexDependencies(self)
self.padding = buff[self.odex_header.deps_offset + self.odex_header.deps_length:]
self.set_idx(self.odex_header.dex_offset)
self.set_buff(self.read(self.odex_header.dex_length))
self.set_idx(0)
def save(self):
"""
Do not use !
"""
dex_raw = super(DalvikOdexVMFormat, self).save()
return self.magic + self.odex_header.get_raw() + dex_raw + self.dependencies.get_raw() + self.padding
def get_buff(self):
return self.magic + self.odex_header.get_raw() + super(DalvikOdexVMFormat, self).get_buff() + self.dependencies.get_raw() + self.padding
def get_dependencies(self):
"""
Return the odex dependencies object
:rtype: an OdexDependencies object
"""
return self.dependencies
def get_format_type(self):
"""
Return the type
:rtype: a string
"""
return "ODEX"
def get_params_info(nb, proto):
i_buffer = "# Parameters:\n"
ret = proto.split(')')
params = ret[0][1:].split()
if params:
i_buffer += "# - local registers: v%d...v%d\n" % (0, nb - len(params) - 1)
j = 0
for i in xrange(nb - len(params), nb):
i_buffer += "# - v%d:%s\n" % (i, get_type(params[j]))
j += 1
else:
i_buffer += "# local registers: v%d...v%d\n" % (0, nb - 1)
i_buffer += "#\n# - return:%s\n\n" % get_type(ret[1])
return i_buffer
def get_bytecodes_method(dex_object, ana_object, method):
mx = ana_object.get_method(method)
return get_bytecodes_methodx(method, mx)
def get_bytecodes_methodx(method, mx):
basic_blocks = mx.basic_blocks.gets()
i_buffer = ""
idx = 0
nb = 0
i_buffer += "# %s->%s%s [access_flags=%s]\n#\n" % (method.get_class_name(), method.get_name(), method.get_descriptor(), method.get_access_flags_string())
if method.code != None:
i_buffer += get_params_info(method.code.get_registers_size(), method.get_descriptor())
for i in basic_blocks:
bb_buffer = ""
ins_buffer = ""
bb_buffer += "%s : " % (i.name)
instructions = i.get_instructions()
for ins in instructions:
ins_buffer += "\t%-8d(%08x) " % (nb, idx)
ins_buffer += "%-20s %s" % (ins.get_name(), ins.get_output(idx))
op_value = ins.get_op_value()
if ins == instructions[-1] and i.childs != []:
# packed/sparse-switch
if (op_value == 0x2b or op_value == 0x2c) and len(i.childs) > 1:
values = i.get_special_ins(idx).get_values()
bb_buffer += "[ D:%s " % (i.childs[0][2].name)
bb_buffer += ' '.join("%d:%s" % (values[j], i.childs[j + 1][2].name) for j in range(0, len(i.childs) - 1)) + " ]"
else:
#if len(i.childs) == 2:
# i_buffer += "%s[ %s%s " % (branch_false_color, i.childs[0][2].name, branch_true_color))
# print_fct(' '.join("%s" % c[2].name for c in i.childs[1:]) + " ]%s" % normal_color)
#else :
bb_buffer += "[ " + ' '.join("%s" % c[2].name for c in i.childs) + " ]"
idx += ins.get_length()
nb += 1
ins_buffer += "\n"
if i.get_exception_analysis() != None:
ins_buffer += "\t%s\n" % (i.exception_analysis.show_buff())
i_buffer += bb_buffer + "\n" + ins_buffer + "\n"
return i_buffer
def auto(filename, raw=None):
"""
:param filename:
:param raw:
:type filename:
:type raw:
"""
data_raw = raw
if raw == None:
data_raw = open(filename, "rb").read()
ret_type = is_android_raw(data_raw[:10])
if ret_type == "DEX":
return DalvikVMFormat(data_raw)
elif ret_type == "ODEX":
return DalvikOdexVMFormat(data_raw)
return None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.