repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
jotes/pontoon | refs/heads/master | pontoon/localizations/urls.py | 3 | from django.urls import include, path, re_path
from pontoon.projects import views as projects_views
from pontoon.teams import views as teams_views
from . import views
urlpatterns = [
path(
"<locale:code>/<slug:slug>/",
include(
[
# Localization page
path(
"", views.localization, name="pontoon.localizations.localization",
),
# Localization tags
path("tags/", views.localization, name="pontoon.localizations.tags",),
# Localization contributors
path(
"contributors/",
views.localization,
name="pontoon.localizations.contributors",
),
# Project info
path(
"project-info/",
views.localization,
name="pontoon.localizations.project-info",
),
# Team info
path(
"team-info/",
views.localization,
name="pontoon.localizations.team-info",
),
# AJAX views
path(
"ajax/",
include(
[
# Localization resources
path(
"",
views.ajax_resources,
name="pontoon.localizations.ajax.resources",
),
# Localization tags
path(
"tags/",
views.ajax_tags,
name="pontoon.localizations.ajax.tags",
),
# Localization contributors
path(
"contributors/",
views.LocalizationContributorsView.as_view(),
name="pontoon.localizations.ajax.contributors",
),
]
),
),
]
),
),
# AJAX view: Project info
re_path(
r"^[A-Za-z0-9\-\@\.]+/(?P<slug>[\w-]+)/ajax/project-info/$",
projects_views.ajax_info,
name="pontoon.localizations.ajax.project-info",
),
# AJAX view: Team info
re_path(
r"^(?P<locale>[A-Za-z0-9\-\@\.]+)/[\w-]+/ajax/team-info/$",
teams_views.ajax_info,
name="pontoon.localizations.ajax.team-info",
),
]
|
salamer/NotesOfDjangoBook | refs/heads/master | mysite/mysite/foobar_view.py | 2 | from django.shortcuts import render
from books.models import Book
def foo_view(request):
books = Book.objects.filter(title__icontains='world')
return render(request,'foobar/foo.html',{'books':books})
def bar_view(request):
books = Book.objects.filter(title__icontains='the')
return render(request,'foobar/bar.html',{'books':books})
def foo_bar_view(request,template_name,search_str):
books=Book.objects.filter(title__icontains=search_str)
return render(request,template_name,{'books':books})
|
MTASZTAKI/ApertusVR | refs/heads/0.9 | plugins/physics/bulletPhysics/3rdParty/bullet3/examples/pybullet/gym/pybullet_envs/minitaur/agents/__init__.py | 386048 | |
rrrene/django | refs/heads/master | tests/template_tests/filter_tests/test_timesince.py | 161 | from __future__ import unicode_literals
from datetime import datetime, timedelta
from django.template.defaultfilters import timesince_filter
from django.test import SimpleTestCase
from django.test.utils import requires_tz_support
from ..utils import setup
from .timezone_utils import TimezoneTestCase
class TimesinceTests(TimezoneTestCase):
"""
#20246 - \xa0 in output avoids line-breaks between value and unit
"""
# Default compare with datetime.now()
@setup({'timesince01': '{{ a|timesince }}'})
def test_timesince01(self):
output = self.engine.render_to_string(
'timesince01', {'a': datetime.now() + timedelta(minutes=-1, seconds=-10)}
)
self.assertEqual(output, '1\xa0minute')
@setup({'timesince02': '{{ a|timesince }}'})
def test_timesince02(self):
output = self.engine.render_to_string(
'timesince02', {'a': datetime.now() - timedelta(days=1, minutes=1)}
)
self.assertEqual(output, '1\xa0day')
@setup({'timesince03': '{{ a|timesince }}'})
def test_timesince03(self):
output = self.engine.render_to_string(
'timesince03', {'a': datetime.now() - timedelta(hours=1, minutes=25, seconds=10)}
)
self.assertEqual(output, '1\xa0hour, 25\xa0minutes')
# Compare to a given parameter
@setup({'timesince04': '{{ a|timesince:b }}'})
def test_timesince04(self):
output = self.engine.render_to_string(
'timesince04',
{'a': self.now - timedelta(days=2), 'b': self.now - timedelta(days=1)},
)
self.assertEqual(output, '1\xa0day')
@setup({'timesince05': '{{ a|timesince:b }}'})
def test_timesince05(self):
output = self.engine.render_to_string(
'timesince05',
{'a': self.now - timedelta(days=2, minutes=1), 'b': self.now - timedelta(days=2)},
)
self.assertEqual(output, '1\xa0minute')
# Check that timezone is respected
@setup({'timesince06': '{{ a|timesince:b }}'})
def test_timesince06(self):
output = self.engine.render_to_string('timesince06', {'a': self.now_tz - timedelta(hours=8), 'b': self.now_tz})
self.assertEqual(output, '8\xa0hours')
# Tests for #7443
@setup({'timesince07': '{{ earlier|timesince }}'})
def test_timesince07(self):
output = self.engine.render_to_string('timesince07', {'earlier': self.now - timedelta(days=7)})
self.assertEqual(output, '1\xa0week')
@setup({'timesince08': '{{ earlier|timesince:now }}'})
def test_timesince08(self):
output = self.engine.render_to_string(
'timesince08', {'now': self.now, 'earlier': self.now - timedelta(days=7)}
)
self.assertEqual(output, '1\xa0week')
@setup({'timesince09': '{{ later|timesince }}'})
def test_timesince09(self):
output = self.engine.render_to_string('timesince09', {'later': self.now + timedelta(days=7)})
self.assertEqual(output, '0\xa0minutes')
@setup({'timesince10': '{{ later|timesince:now }}'})
def test_timesince10(self):
output = self.engine.render_to_string('timesince10', {'now': self.now, 'later': self.now + timedelta(days=7)})
self.assertEqual(output, '0\xa0minutes')
# Ensures that differing timezones are calculated correctly.
@setup({'timesince11': '{{ a|timesince }}'})
def test_timesince11(self):
output = self.engine.render_to_string('timesince11', {'a': self.now})
self.assertEqual(output, '0\xa0minutes')
@requires_tz_support
@setup({'timesince12': '{{ a|timesince }}'})
def test_timesince12(self):
output = self.engine.render_to_string('timesince12', {'a': self.now_tz})
self.assertEqual(output, '0\xa0minutes')
@requires_tz_support
@setup({'timesince13': '{{ a|timesince }}'})
def test_timesince13(self):
output = self.engine.render_to_string('timesince13', {'a': self.now_tz_i})
self.assertEqual(output, '0\xa0minutes')
@setup({'timesince14': '{{ a|timesince:b }}'})
def test_timesince14(self):
output = self.engine.render_to_string('timesince14', {'a': self.now_tz, 'b': self.now_tz_i})
self.assertEqual(output, '0\xa0minutes')
@setup({'timesince15': '{{ a|timesince:b }}'})
def test_timesince15(self):
output = self.engine.render_to_string('timesince15', {'a': self.now, 'b': self.now_tz_i})
self.assertEqual(output, '')
@setup({'timesince16': '{{ a|timesince:b }}'})
def test_timesince16(self):
output = self.engine.render_to_string('timesince16', {'a': self.now_tz_i, 'b': self.now})
self.assertEqual(output, '')
# Tests for #9065 (two date objects).
@setup({'timesince17': '{{ a|timesince:b }}'})
def test_timesince17(self):
output = self.engine.render_to_string('timesince17', {'a': self.today, 'b': self.today})
self.assertEqual(output, '0\xa0minutes')
@setup({'timesince18': '{{ a|timesince:b }}'})
def test_timesince18(self):
output = self.engine.render_to_string('timesince18', {'a': self.today, 'b': self.today + timedelta(hours=24)})
self.assertEqual(output, '1\xa0day')
class FunctionTests(SimpleTestCase):
def test_since_now(self):
self.assertEqual(timesince_filter(datetime.now() - timedelta(1)), '1\xa0day')
def test_explicit_date(self):
self.assertEqual(timesince_filter(datetime(2005, 12, 29), datetime(2005, 12, 30)), '1\xa0day')
|
frouty/odoo_oph | refs/heads/dev_70 | addons/account/wizard/account_report_common.py | 37 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from lxml import etree
from openerp.osv import fields, osv
from openerp.osv.orm import setup_modifiers
from openerp.tools.translate import _
class account_common_report(osv.osv_memory):
_name = "account.common.report"
_description = "Account Common Report"
def onchange_chart_id(self, cr, uid, ids, chart_account_id=False, context=None):
res = {}
if chart_account_id:
company_id = self.pool.get('account.account').browse(cr, uid, chart_account_id, context=context).company_id.id
now = time.strftime('%Y-%m-%d')
domain = [('company_id', '=', company_id), ('date_start', '<', now), ('date_stop', '>', now)]
fiscalyears = self.pool.get('account.fiscalyear').search(cr, uid, domain, limit=1)
res['value'] = {'company_id': company_id, 'fiscalyear_id': fiscalyears and fiscalyears[0] or False}
return res
_columns = {
'chart_account_id': fields.many2one('account.account', 'Chart of Account', help='Select Charts of Accounts', required=True, domain = [('parent_id','=',False)]),
'company_id': fields.related('chart_account_id', 'company_id', type='many2one', relation='res.company', string='Company', readonly=True),
'fiscalyear_id': fields.many2one('account.fiscalyear', 'Fiscal Year', help='Keep empty for all open fiscal year'),
'filter': fields.selection([('filter_no', 'No Filters'), ('filter_date', 'Date'), ('filter_period', 'Periods')], "Filter by", required=True),
'period_from': fields.many2one('account.period', 'Start Period'),
'period_to': fields.many2one('account.period', 'End Period'),
'journal_ids': fields.many2many('account.journal', string='Journals', required=True),
'date_from': fields.date("Start Date"),
'date_to': fields.date("End Date"),
'target_move': fields.selection([('posted', 'All Posted Entries'),
('all', 'All Entries'),
], 'Target Moves', required=True),
}
def _check_company_id(self, cr, uid, ids, context=None):
for wiz in self.browse(cr, uid, ids, context=context):
company_id = wiz.company_id.id
if wiz.fiscalyear_id and company_id != wiz.fiscalyear_id.company_id.id:
return False
if wiz.period_from and company_id != wiz.period_from.company_id.id:
return False
if wiz.period_to and company_id != wiz.period_to.company_id.id:
return False
return True
_constraints = [
(_check_company_id, 'The fiscalyear, periods or chart of account chosen have to belong to the same company.', ['chart_account_id','fiscalyear_id','period_from','period_to']),
]
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
if context is None:context = {}
res = super(account_common_report, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=False)
if context.get('active_model', False) == 'account.account':
doc = etree.XML(res['arch'])
nodes = doc.xpath("//field[@name='chart_account_id']")
for node in nodes:
node.set('readonly', '1')
node.set('help', 'If you print the report from Account list/form view it will not consider Charts of account')
setup_modifiers(node, res['fields']['chart_account_id'])
res['arch'] = etree.tostring(doc)
return res
def onchange_filter(self, cr, uid, ids, filter='filter_no', fiscalyear_id=False, context=None):
res = {'value': {}}
if filter == 'filter_no':
res['value'] = {'period_from': False, 'period_to': False, 'date_from': False ,'date_to': False}
if filter == 'filter_date':
res['value'] = {'period_from': False, 'period_to': False, 'date_from': time.strftime('%Y-01-01'), 'date_to': time.strftime('%Y-%m-%d')}
if filter == 'filter_period' and fiscalyear_id:
start_period = end_period = False
cr.execute('''
SELECT * FROM (SELECT p.id
FROM account_period p
LEFT JOIN account_fiscalyear f ON (p.fiscalyear_id = f.id)
WHERE f.id = %s
AND p.special = false
ORDER BY p.date_start ASC, p.special ASC
LIMIT 1) AS period_start
UNION ALL
SELECT * FROM (SELECT p.id
FROM account_period p
LEFT JOIN account_fiscalyear f ON (p.fiscalyear_id = f.id)
WHERE f.id = %s
AND p.date_start < NOW()
AND p.special = false
ORDER BY p.date_stop DESC
LIMIT 1) AS period_stop''', (fiscalyear_id, fiscalyear_id))
periods = [i[0] for i in cr.fetchall()]
if periods and len(periods) > 1:
start_period = periods[0]
end_period = periods[1]
res['value'] = {'period_from': start_period, 'period_to': end_period, 'date_from': False, 'date_to': False}
return res
def _get_account(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
accounts = self.pool.get('account.account').search(cr, uid, [('parent_id', '=', False), ('company_id', '=', user.company_id.id)], limit=1)
return accounts and accounts[0] or False
def _get_fiscalyear(self, cr, uid, context=None):
if context is None:
context = {}
now = time.strftime('%Y-%m-%d')
company_id = False
ids = context.get('active_ids', [])
if ids and context.get('active_model') == 'account.account':
company_id = self.pool.get('account.account').browse(cr, uid, ids[0], context=context).company_id.id
else: # use current company id
company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
domain = [('company_id', '=', company_id), ('date_start', '<', now), ('date_stop', '>', now)]
fiscalyears = self.pool.get('account.fiscalyear').search(cr, uid, domain, limit=1)
return fiscalyears and fiscalyears[0] or False
def _get_all_journal(self, cr, uid, context=None):
return self.pool.get('account.journal').search(cr, uid ,[])
_defaults = {
'fiscalyear_id': _get_fiscalyear,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.common.report',context=c),
'journal_ids': _get_all_journal,
'filter': 'filter_no',
'chart_account_id': _get_account,
'target_move': 'posted',
}
def _build_contexts(self, cr, uid, ids, data, context=None):
if context is None:
context = {}
result = {}
result['fiscalyear'] = 'fiscalyear_id' in data['form'] and data['form']['fiscalyear_id'] or False
result['journal_ids'] = 'journal_ids' in data['form'] and data['form']['journal_ids'] or False
result['chart_account_id'] = 'chart_account_id' in data['form'] and data['form']['chart_account_id'] or False
result['state'] = 'target_move' in data['form'] and data['form']['target_move'] or ''
if data['form']['filter'] == 'filter_date':
result['date_from'] = data['form']['date_from']
result['date_to'] = data['form']['date_to']
elif data['form']['filter'] == 'filter_period':
if not data['form']['period_from'] or not data['form']['period_to']:
raise osv.except_osv(_('Error!'),_('Select a starting and an ending period.'))
result['period_from'] = data['form']['period_from']
result['period_to'] = data['form']['period_to']
return result
def _print_report(self, cr, uid, ids, data, context=None):
raise (_('Error!'), _('Not implemented.'))
def check_report(self, cr, uid, ids, context=None):
if context is None:
context = {}
data = {}
data['ids'] = context.get('active_ids', [])
data['model'] = context.get('active_model', 'ir.ui.menu')
data['form'] = self.read(cr, uid, ids, ['date_from', 'date_to', 'fiscalyear_id', 'journal_ids', 'period_from', 'period_to', 'filter', 'chart_account_id', 'target_move'], context=context)[0]
for field in ['fiscalyear_id', 'chart_account_id', 'period_from', 'period_to']:
if isinstance(data['form'][field], tuple):
data['form'][field] = data['form'][field][0]
used_context = self._build_contexts(cr, uid, ids, data, context=context)
data['form']['periods'] = used_context.get('periods', False) and used_context['periods'] or []
data['form']['used_context'] = dict(used_context, lang=context.get('lang', 'en_US'))
return self._print_report(cr, uid, ids, data, context=context)
account_common_report()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
drnextgis/QGIS | refs/heads/master | python/ext-libs/requests/utils.py | 148 | # -*- coding: utf-8 -*-
"""
requests.utils
~~~~~~~~~~~~~~
This module provides utility functions that are used within Requests
that are also useful for external consumption.
"""
import cgi
import codecs
import collections
import io
import os
import re
import socket
import struct
import warnings
from . import __version__
from . import certs
from .compat import parse_http_list as _parse_list_header
from .compat import (quote, urlparse, bytes, str, OrderedDict, unquote, is_py2,
builtin_str, getproxies, proxy_bypass, urlunparse,
basestring)
from .cookies import RequestsCookieJar, cookiejar_from_dict
from .structures import CaseInsensitiveDict
from .exceptions import InvalidURL, FileModeWarning
_hush_pyflakes = (RequestsCookieJar,)
NETRC_FILES = ('.netrc', '_netrc')
DEFAULT_CA_BUNDLE_PATH = certs.where()
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
if hasattr(d, 'items'):
d = d.items()
return d
def super_len(o):
total_length = 0
current_position = 0
if hasattr(o, '__len__'):
total_length = len(o)
elif hasattr(o, 'len'):
total_length = o.len
elif hasattr(o, 'getvalue'):
# e.g. BytesIO, cStringIO.StringIO
total_length = len(o.getvalue())
elif hasattr(o, 'fileno'):
try:
fileno = o.fileno()
except io.UnsupportedOperation:
pass
else:
total_length = os.fstat(fileno).st_size
# Having used fstat to determine the file length, we need to
# confirm that this file was opened up in binary mode.
if 'b' not in o.mode:
warnings.warn((
"Requests has determined the content-length for this "
"request using the binary size of the file: however, the "
"file has been opened in text mode (i.e. without the 'b' "
"flag in the mode). This may lead to an incorrect "
"content-length. In Requests 3.0, support will be removed "
"for files in text mode."),
FileModeWarning
)
if hasattr(o, 'tell'):
try:
current_position = o.tell()
except (OSError, IOError):
# This can happen in some weird situations, such as when the file
# is actually a special file descriptor like stdin. In this
# instance, we don't know what the length is, so set it to zero and
# let requests chunk it instead.
current_position = total_length
return max(0, total_length - current_position)
def get_netrc_auth(url, raise_errors=False):
"""Returns the Requests tuple auth for a given url from netrc."""
try:
from netrc import netrc, NetrcParseError
netrc_path = None
for f in NETRC_FILES:
try:
loc = os.path.expanduser('~/{0}'.format(f))
except KeyError:
# os.path.expanduser can fail when $HOME is undefined and
# getpwuid fails. See http://bugs.python.org/issue20164 &
# https://github.com/kennethreitz/requests/issues/1846
return
if os.path.exists(loc):
netrc_path = loc
break
# Abort early if there isn't one.
if netrc_path is None:
return
ri = urlparse(url)
# Strip port numbers from netloc. This weird `if...encode`` dance is
# used for Python 3.2, which doesn't support unicode literals.
splitstr = b':'
if isinstance(url, str):
splitstr = splitstr.decode('ascii')
host = ri.netloc.split(splitstr)[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = (0 if _netrc[0] else 1)
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, IOError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth unless explicitly asked to raise errors.
if raise_errors:
raise
# AppEngine hackiness.
except (ImportError, AttributeError):
pass
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if (name and isinstance(name, basestring) and name[0] != '<' and
name[-1] != '>'):
return os.path.basename(name)
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
ValueError: need more than 1 value to unpack
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
return OrderedDict(value)
def to_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'val')]
>>> to_key_val_list('string')
ValueError: cannot encode objects that are not 2-tuples.
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
if isinstance(value, collections.Mapping):
value = value.items()
return list(value)
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission).
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
# From mitsuhiko/werkzeug (used with permission).
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
"""
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
"""
cj2 = cookiejar_from_dict(cookie_dict)
cj.update(cj2)
return cj
def get_encodings_from_content(content):
"""Returns encodings from given content string.
:param content: bytestring to extract encodings from.
"""
warnings.warn((
'In requests 3.0, get_encodings_from_content will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
return (charset_re.findall(content) +
pragma_re.findall(content) +
xml_re.findall(content))
def get_encoding_from_headers(headers):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = cgi.parse_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return 'ISO-8859-1'
def stream_decode_response_unicode(iterator, r):
"""Stream decodes a iterator."""
if r.encoding is None:
for item in iterator:
yield item
return
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode(b'', final=True)
if rv:
yield rv
def iter_slices(string, slice_length):
"""Iterate over slices of a string."""
pos = 0
while pos < len(string):
yield string[pos:pos + slice_length]
pos += slice_length
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. fall back and replace all unicode characters
"""
warnings.warn((
'In requests 3.0, get_unicode_from_response will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ "0123456789-._~")
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16))
except ValueError:
raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
"""
safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
safe_without_percent = "!#$&'()*+,/:;=?@[]~"
try:
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved,
# unreserved, or '%')
return quote(unquote_unreserved(uri), safe=safe_with_percent)
except InvalidURL:
# We couldn't unquote the given URI, so let's try quoting it, but
# there may be unquoted '%'s in the URI. We need to make sure they're
# properly quoted so they do not cause issues elsewhere.
return quote(uri, safe=safe_without_percent)
def address_in_network(ip, net):
"""
This function allows you to check if on IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
"""
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
netaddr, bits = net.split('/')
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
def dotted_netmask(mask):
"""
Converts mask from /xx format to xxx.xxx.xxx.xxx
Example: if mask is 24 function returns 255.255.255.0
"""
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack('>I', bits))
def is_ipv4_address(string_ip):
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return True
def is_valid_cidr(string_network):
"""Very simple check of the cidr format in no_proxy variable"""
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True
def should_bypass_proxies(url):
"""
Returns whether we should bypass proxies or not.
"""
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy = get_proxy('no_proxy')
netloc = urlparse(url).netloc
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the netloc, both with and without the port.
no_proxy = (
host for host in no_proxy.replace(' ', '').split(',') if host
)
ip = netloc.split(':')[0]
if is_ipv4_address(ip):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(ip, proxy_ip):
return True
else:
for host in no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return True
# If the system proxy settings indicate that this URL should be bypassed,
# don't proxy.
# The proxy_bypass function is incredibly buggy on OS X in early versions
# of Python 2.6, so allow this call to fail. Only catch the specific
# exceptions we've seen, though: this call failing in other ways can reveal
# legitimate problems.
try:
bypass = proxy_bypass(netloc)
except (TypeError, socket.gaierror):
bypass = False
if bypass:
return True
return False
def get_environ_proxies(url):
"""Return a dict of environment proxies."""
if should_bypass_proxies(url):
return {}
else:
return getproxies()
def select_proxy(url, proxies):
"""Select a proxy for the url, if applicable.
:param url: The url being for the request
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
"""
proxies = proxies or {}
urlparts = urlparse(url)
if urlparts.hostname is None:
proxy = None
else:
proxy = proxies.get(urlparts.scheme+'://'+urlparts.hostname)
if proxy is None:
proxy = proxies.get(urlparts.scheme)
return proxy
def default_user_agent(name="python-requests"):
"""Return a string representing the default user agent."""
return '%s/%s' % (name, __version__)
def default_headers():
return CaseInsensitiveDict({
'User-Agent': default_user_agent(),
'Accept-Encoding': ', '.join(('gzip', 'deflate')),
'Accept': '*/*',
'Connection': 'keep-alive',
})
def parse_header_links(value):
"""Return a dict of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
"""
links = []
replace_chars = ' \'"'
for val in re.split(', *<', value):
try:
url, params = val.split(';', 1)
except ValueError:
url, params = val, ''
link = {'url': url.strip('<> \'"')}
for param in params.split(';'):
try:
key, value = param.split('=')
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
# Null bytes; no need to recreate these on each call to guess_json_utf
_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
_null2 = _null * 2
_null3 = _null * 3
def guess_json_utf(data):
# JSON always starts with two ASCII characters, so detection is as
# easy as counting the nulls and from their location and count
# determine the encoding. Also detect a BOM, if present.
sample = data[:4]
if sample in (codecs.BOM_UTF32_LE, codecs.BOM32_BE):
return 'utf-32' # BOM included
if sample[:3] == codecs.BOM_UTF8:
return 'utf-8-sig' # BOM included, MS style (discouraged)
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
return 'utf-16' # BOM included
nullcount = sample.count(_null)
if nullcount == 0:
return 'utf-8'
if nullcount == 2:
if sample[::2] == _null2: # 1st and 3rd are null
return 'utf-16-be'
if sample[1::2] == _null2: # 2nd and 4th are null
return 'utf-16-le'
# Did not detect 2 valid UTF-16 ascii-range characters
if nullcount == 3:
if sample[:3] == _null3:
return 'utf-32-be'
if sample[1:] == _null3:
return 'utf-32-le'
# Did not detect a valid UTF-32 ascii-range character
return None
def prepend_scheme_if_needed(url, new_scheme):
"""Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument."""
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
# urlparse is a finicky beast, and sometimes decides that there isn't a
# netloc present. Assume that it's being over-cautious, and switch netloc
# and path if urlparse decided there was no netloc.
if not netloc:
netloc, path = path, netloc
return urlunparse((scheme, netloc, path, params, query, fragment))
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password."""
parsed = urlparse(url)
try:
auth = (unquote(parsed.username), unquote(parsed.password))
except (AttributeError, TypeError):
auth = ('', '')
return auth
def to_native_string(string, encoding='ascii'):
"""
Given a string object, regardless of type, returns a representation of that
string in the native string type, encoding and decoding where necessary.
This assumes ASCII unless told otherwise.
"""
if isinstance(string, builtin_str):
out = string
else:
if is_py2:
out = string.encode(encoding)
else:
out = string.decode(encoding)
return out
def urldefragauth(url):
"""
Given a url remove the fragment and the authentication part
"""
scheme, netloc, path, params, query, fragment = urlparse(url)
# see func:`prepend_scheme_if_needed`
if not netloc:
netloc, path = path, netloc
netloc = netloc.rsplit('@', 1)[-1]
return urlunparse((scheme, netloc, path, params, query, ''))
|
morenopc/edx-platform | refs/heads/ok-merge-from-master | lms/envs/cms/acceptance.py | 52 | """
This config file is a copy of dev environment without the Debug
Toolbar. I it suitable to run against acceptance tests.
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=W0401, W0614
from .dev import *
# REMOVE DEBUG TOOLBAR
INSTALLED_APPS = tuple(e for e in INSTALLED_APPS if e != 'debug_toolbar')
INSTALLED_APPS = tuple(e for e in INSTALLED_APPS if e != 'debug_toolbar_mongo')
MIDDLEWARE_CLASSES = tuple(e for e in MIDDLEWARE_CLASSES \
if e != 'debug_toolbar.middleware.DebugToolbarMiddleware')
########################### LETTUCE TESTING ##########################
FEATURES['DISPLAY_TOY_COURSES'] = True
INSTALLED_APPS += ('lettuce.django',)
# INSTALLED_APPS += ('portal',)
LETTUCE_APPS = ('portal',) # dummy app covers the home page, login, registration, and course enrollment
|
odejesush/tensorflow | refs/heads/master | venv/lib/python2.7/site-packages/setuptools/command/install_lib.py | 431 | import os
import imp
from itertools import product, starmap
import distutils.command.install_lib as orig
class install_lib(orig.install_lib):
"""Don't add compiled flags to filenames of non-Python files"""
def run(self):
self.build()
outfiles = self.install()
if outfiles is not None:
# always compile, in case we have any extension stubs to deal with
self.byte_compile(outfiles)
def get_exclusions(self):
"""
Return a collections.Sized collections.Container of paths to be
excluded for single_version_externally_managed installations.
"""
all_packages = (
pkg
for ns_pkg in self._get_SVEM_NSPs()
for pkg in self._all_packages(ns_pkg)
)
excl_specs = product(all_packages, self._gen_exclusion_paths())
return set(starmap(self._exclude_pkg_path, excl_specs))
def _exclude_pkg_path(self, pkg, exclusion_path):
"""
Given a package name and exclusion path within that package,
compute the full exclusion path.
"""
parts = pkg.split('.') + [exclusion_path]
return os.path.join(self.install_dir, *parts)
@staticmethod
def _all_packages(pkg_name):
"""
>>> list(install_lib._all_packages('foo.bar.baz'))
['foo.bar.baz', 'foo.bar', 'foo']
"""
while pkg_name:
yield pkg_name
pkg_name, sep, child = pkg_name.rpartition('.')
def _get_SVEM_NSPs(self):
"""
Get namespace packages (list) but only for
single_version_externally_managed installations and empty otherwise.
"""
# TODO: is it necessary to short-circuit here? i.e. what's the cost
# if get_finalized_command is called even when namespace_packages is
# False?
if not self.distribution.namespace_packages:
return []
install_cmd = self.get_finalized_command('install')
svem = install_cmd.single_version_externally_managed
return self.distribution.namespace_packages if svem else []
@staticmethod
def _gen_exclusion_paths():
"""
Generate file paths to be excluded for namespace packages (bytecode
cache files).
"""
# always exclude the package module itself
yield '__init__.py'
yield '__init__.pyc'
yield '__init__.pyo'
if not hasattr(imp, 'get_tag'):
return
base = os.path.join('__pycache__', '__init__.' + imp.get_tag())
yield base + '.pyc'
yield base + '.pyo'
yield base + '.opt-1.pyc'
yield base + '.opt-2.pyc'
def copy_tree(
self, infile, outfile,
preserve_mode=1, preserve_times=1, preserve_symlinks=0, level=1
):
assert preserve_mode and preserve_times and not preserve_symlinks
exclude = self.get_exclusions()
if not exclude:
return orig.install_lib.copy_tree(self, infile, outfile)
# Exclude namespace package __init__.py* files from the output
from setuptools.archive_util import unpack_directory
from distutils import log
outfiles = []
def pf(src, dst):
if dst in exclude:
log.warn("Skipping installation of %s (namespace package)",
dst)
return False
log.info("copying %s -> %s", src, os.path.dirname(dst))
outfiles.append(dst)
return dst
unpack_directory(infile, outfile, pf)
return outfiles
def get_outputs(self):
outputs = orig.install_lib.get_outputs(self)
exclude = self.get_exclusions()
if exclude:
return [f for f in outputs if f not in exclude]
return outputs
|
gavinzbq/synonym | refs/heads/master | synonym/__init__.py | 1 | # -*- coding: utf-8 -*-
"""Top-level package for synonym."""
__author__ = """Shanyun Gao"""
__email__ = 'shanyun@g.clemson.edu'
__version__ = '0.1.3'
|
dalou/django-extended | refs/heads/master | django_extended/forms/geo.py | 5 | # # encoding: utf-8
# from django import forms
# from django.utils.translation import ugettext_lazy as _
# from ..widgets.geo import LocationInput
# class LocationField(forms.CharField):
# def __init__(self, *args, **kwargs):
# if 'widget' not in kwargs:
# kwargs['widget'] = LocationInput(
# attrs={'placeholder': _(u'Saisissez une adresse')}
# )
# super(LocationField, self).__init__(*args, **kwargs)
|
Panos512/invenio | refs/heads/prod | modules/websearch/web/admin/websearchadmin.py | 25 | ## This file is part of Invenio.
## Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Invenio WebSearch Administrator Interface."""
__revision__ = "$Id$"
__lastupdated__ = """$Date$"""
import invenio.websearchadminlib as wsc
from invenio.bibrankadminlib import check_user
from invenio.webpage import page, adderrorbox, error_page
from invenio.config import CFG_SITE_URL, CFG_SITE_SECURE_URL, CFG_SITE_LANG, CFG_SITE_NAME
from invenio.webuser import getUid, page_not_authorized
from invenio.messages import gettext_set_language
from invenio.urlutils import wash_url_argument
def switchfmtscore(req, colID, type, id_1, id_2, ln=CFG_SITE_LANG):
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="Edit Collection",
body=wsc.perform_switchfmtscore(colID=colID,
ln=ln,
type=type,
id_1=id_1,
id_2=id_2),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def switchfldscore(req, colID, id_1, id_2, fmeth, ln=CFG_SITE_LANG):
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="Edit Collection",
body=wsc.perform_switchfldscore(colID=colID,
ln=ln,
id_1=id_1,
id_2=id_2,
fmeth=fmeth),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def switchfldvaluescore(req, colID, id_1, id_fldvalue_1, id_fldvalue_2, ln=CFG_SITE_LANG):
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="Edit Collection",
body=wsc.perform_switchfldvaluescore(colID=colID,
ln=ln,
id_1=id_1,
id_fldvalue_1=id_fldvalue_1,
id_fldvalue_2=id_fldvalue_2),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def runwebcoll(req, colID, ln=CFG_SITE_LANG, confirm=0):
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="WebSearch Admin",
body=wsc.perform_checkwebcollstatus(colID=colID,
ln=ln,
confirm=confirm),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def switchpbxscore(req, colID, id_1, id_2, sel_ln,ln=CFG_SITE_LANG):
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="Edit Collection",
body=wsc.perform_switchpbxscore(colID=colID,
ln=ln,
id_1=id_1,
id_2=id_2,
sel_ln=sel_ln),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def modifydbquery(req, colID, ln=CFG_SITE_LANG, dbquery='', confirm=-1):
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="Edit Collection",
body=wsc.perform_modifydbquery(colID=colID,
ln=ln,
dbquery=dbquery,
confirm=confirm),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def showtree(req, colID, ln=CFG_SITE_LANG):
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="Collection tree",
body=wsc.perform_showtree(colID=colID,
ln=ln),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def modifytranslations(req, colID, ln=CFG_SITE_LANG, sel_type='', trans = [], confirm=-1):
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="Edit Collection",
body=wsc.perform_modifytranslations(colID=colID,
ln=ln,
sel_type=sel_type,
trans=trans,
confirm=confirm),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def addcollectiontotree(req, colID, ln=CFG_SITE_LANG, add_dad='', add_son='', rtype='', mtype='', callback='yes', confirm=-1):
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="WebSearch Admin",
body=wsc.perform_addcollectiontotree(colID=colID,
ln=CFG_SITE_LANG,
add_dad=add_dad,
add_son=add_son,
rtype=rtype,
callback=callback,
confirm=confirm),
uid=uid,
language=ln,
navtrail = navtrail_previous_links,
req=req,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def addcollection(req, colID, ln=CFG_SITE_LANG, colNAME='', dbquery='', callback="yes", confirm=-1):
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="WebSearch Admin",
body=wsc.perform_addcollection(colID=colID,
ln=CFG_SITE_LANG,
colNAME=colNAME,
dbquery=dbquery,
callback=callback,
confirm=confirm),
uid=uid,
language=ln,
navtrail = navtrail_previous_links,
req=req,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def modifyrankmethods(req, colID, ln=CFG_SITE_LANG, func='', rnkID='', confirm=0):
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="Edit Collection",
body=wsc.perform_modifyrankmethods(colID=colID,
ln=ln,
func=func,
rnkID=rnkID,
confirm=confirm),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def deletecollection(req, colID, ln=CFG_SITE_LANG, confirm=-1):
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="Edit Collection",
body=wsc.perform_deletecollection(colID=colID,
ln=ln,
confirm=confirm),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def editcollection(req, colID=1, ln=CFG_SITE_LANG, mtype=''):
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="Edit Collection",
body=wsc.perform_editcollection(colID=colID,
ln=ln,
mtype=mtype),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def checkexternalcollections(req, colID, ln=CFG_SITE_LANG, icl=None, update="", confirm=0, callback='yes'):
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="WebSearch Admin",
body=wsc.perform_checkexternalcollections(colID=colID,
ln=ln,
icl=icl,
update=update,
confirm=confirm,
callback=callback),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def showoutputformats(req, colID, ln=CFG_SITE_LANG, callback='yes', confirm=0):
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="Edit Collection",
body=wsc.perform_showoutputformats(colID=colID,
ln=ln,
callback=callback,
confirm=confirm),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def addexistingoutputformat(req, colID, ln=CFG_SITE_LANG, fmtID=-1, callback='yes', confirm=-1):
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="Edit Collection",
body=wsc.perform_addexistingoutputformat(colID=colID,
ln=ln,
fmtID=fmtID,
callback=callback,
confirm=confirm),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def deleteoutputformat(req, colID, ln=CFG_SITE_LANG, fmtID=-1, callback='yes', confirm=-1):
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="Edit Collection",
body=wsc.perform_deleteoutputformat(colID=colID,
ln=ln,
fmtID=fmtID,
callback=callback,
confirm=confirm),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def removeoutputformat(req, colID, ln=CFG_SITE_LANG, fmtID='', callback='yes', confirm=0):
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="Edit Collection",
body=wsc.perform_removeoutputformat(colID=colID,
ln=ln,
fmtID=fmtID,
callback=callback,
confirm=confirm),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def update_external_collections(req, colID, ln=CFG_SITE_LANG, state=None, recurse=None):
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="Edit Collection",
body = wsc.perform_update_external_collections(colID, ln, state, recurse),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def update_detailed_record_options(req, colID, ln=CFG_SITE_LANG, tabs=[], recurse=0):
"""Update the preferences for the tab to show/hide in the detailed record page. """
_tabs = wash_url_argument(tabs, 'list')
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="Edit Collection",
body = wsc.perform_update_detailed_record_options(colID, ln, _tabs, recurse),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def removefieldvalue(req, colID, ln=CFG_SITE_LANG, fldID='', fldvID='', fmeth='', callback='yes', confirm=0):
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="Edit Collection",
body=wsc.perform_removefieldvalue(colID=colID,
ln=ln,
fldID=fldID,
fldvID=fldvID,
fmeth=fmeth,
callback=callback,
confirm=confirm),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def removefield(req, colID, ln=CFG_SITE_LANG, fldID='', fldvID='', fmeth='', callback='yes', confirm=0):
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="Edit Collection",
body=wsc.perform_removefield(colID=colID,
ln=ln,
fldID=fldID,
fldvID=fldvID,
fmeth=fmeth,
callback=callback,
confirm=confirm),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def modifyfield(req, colID, fldID, fldvID='', ln=CFG_SITE_LANG, callback='yes', confirm=0):
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="Edit Collection",
body=wsc.perform_modifyfield(colID=colID,
fldID=fldID,
fldvID=fldvID,
ln=ln,
callback=callback,
confirm=confirm),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def showsearchoptions(req, colID, ln=CFG_SITE_LANG, callback='yes', confirm=0):
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="Edit Collection",
body=wsc.perform_showsearchoptions(colID=colID,
ln=ln,
callback=callback,
confirm=confirm),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def addexistingfield(req, colID, ln=CFG_SITE_LANG, fldID=-1, fldvID=-1, fmeth='', callback='yes', confirm=-1):
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="Edit Collection",
body=wsc.perform_addexistingfield(colID=colID,
ln=ln,
fldID=fldID,
fldvID=fldvID,
fmeth=fmeth,
callback=callback,
confirm=confirm),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page(title='Authorization failure',
uid=uid,
body=adderrorbox('try to login first',
datalist=["""You are not a user authorized to perform admin tasks, try to
<a href="%s/youraccount/login?referer=%s/admin/websearch/websearchadmin.py/">login</a> with another account.""" % (CFG_SITE_SECURE_URL, CFG_SITE_URL)]),
navtrail= navtrail_previous_links,
lastupdated=__lastupdated__)
def rearrangefield(req, colID, ln=CFG_SITE_LANG, fmeth='', callback='yes', confirm=-1):
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="Edit Collection",
body=wsc.perform_rearrangefield(colID=colID,
ln=ln,
fmeth=fmeth,
callback=callback,
confirm=confirm),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page(title='Authorization failure',
uid=uid,
body=adderrorbox('try to login first',
datalist=["""You are not a user authorized to perform admin tasks, try to
<a href="%s/youraccount/login?referer=%s/admin/websearch/websearchadmin.py/">login</a> with another account.""" % (CFG_SITE_SECURE_URL, CFG_SITE_URL)]),
navtrail= navtrail_previous_links,
lastupdated=__lastupdated__)
def addexistingfieldvalue(req, colID, fldID, ln=CFG_SITE_LANG, callback='yes', confirm=-1):
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="Edit Collection",
body=wsc.perform_addexistingfieldvalue(colID=colID,
ln=ln,
fldID=fldID,
callback=callback,
confirm=confirm),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page(title='Authorization failure',
uid=uid,
body=adderrorbox('try to login first',
datalist=["""You are not a user authorized to perform admin tasks, try to
<a href="%s/youraccount/login?referer=%s/admin/websearch/websearchadmin.py/">login</a> with another account.""" % (CFG_SITE_SECURE_URL, CFG_SITE_URL)]),
navtrail= navtrail_previous_links,
lastupdated=__lastupdated__)
def rearrangefieldvalue(req, colID, fldID, ln=CFG_SITE_LANG, callback='yes', confirm=-1):
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="Edit Collection",
body=wsc.perform_rearrangefieldvalue(colID=colID,
ln=ln,
fldID=fldID,
callback=callback,
confirm=confirm),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page(title='Authorization failure',
uid=uid,
body=adderrorbox('try to login first',
datalist=["""You are not a user authorized to perform admin tasks, try to
<a href="%s/youraccount/login?referer=%s/admin/websearch/websearchadmin.py/">login</a> with another account.""" % (CFG_SITE_SECURE_URL, CFG_SITE_URL)]),
navtrail= navtrail_previous_links,
lastupdated=__lastupdated__)
def addnewfieldvalue(req, colID, fldID, ln=CFG_SITE_LANG, name='', value='', callback="yes", confirm=-1):
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="Edit Collection",
body=wsc.perform_addnewfieldvalue(colID=colID,
fldID=fldID,
ln=ln,
name=name,
value=value,
callback=callback,
confirm=confirm),
uid=uid,
language=ln,
navtrail = navtrail_previous_links,
req=req,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def modifyfieldvalue(req, colID, fldID, fldvID, ln=CFG_SITE_LANG, name='', value='', callback="yes", confirm=-1):
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="Edit Collection",
body=wsc.perform_modifyfieldvalue(colID=colID,
fldID=fldID,
fldvID=fldvID,
ln=ln,
name=name,
value=value,
callback=callback,
confirm=confirm),
uid=uid,
language=ln,
navtrail = navtrail_previous_links,
req=req,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def showsearchfields(req, colID, ln=CFG_SITE_LANG, callback='yes', confirm=0):
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="Edit Collection",
body=wsc.perform_showsearchfields(colID=colID,
ln=ln,
callback=callback,
confirm=confirm),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def showsortoptions(req, colID, ln=CFG_SITE_LANG, callback='yes', confirm=0):
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="Edit Collection",
body=wsc.perform_showsortoptions(colID=colID,
ln=ln,
callback=callback,
confirm=confirm),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def modifyportalbox(req, colID, ln=CFG_SITE_LANG, pbxID=-1, score='', position='', sel_ln='', title='', body='', callback='yes', confirm=-1):
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="Edit Collection",
body=wsc.perform_modifyportalbox(colID=colID,
ln=ln,
pbxID=pbxID,
score=score,
position=position,
sel_ln=sel_ln,
title=title,
body=body,
callback=callback,
confirm=confirm),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def removeportalbox(req, colID, ln=CFG_SITE_LANG, pbxID='', sel_ln='', callback='yes', confirm=0):
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="Edit Collection",
body=wsc.perform_removeportalbox(colID=colID,
ln=ln,
pbxID=pbxID,
sel_ln=sel_ln,
callback=callback,
confirm=confirm),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def addexistingportalbox(req, colID, ln=CFG_SITE_LANG, pbxID=-1, score=0, position='', sel_ln='', callback='yes', confirm=0):
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="Edit Collection",
body=wsc.perform_addexistingportalbox(colID=colID,
ln=ln,
pbxID=pbxID,
score=score,
position=position,
sel_ln=sel_ln,
callback=callback,
confirm=confirm),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page(title='Authorization failure',
uid=uid,
body=adderrorbox('try to login first',
datalist=["""You are not a user authorized to perform admin tasks, try to
<a href="%s/youraccount/login?referer=%s/admin/websearch/websearchadmin.py/">login</a> with another account.""" % (CFG_SITE_SECURE_URL, CFG_SITE_URL)]),
navtrail= navtrail_previous_links,
lastupdated=__lastupdated__)
def deleteportalbox(req, colID, ln=CFG_SITE_LANG, pbxID=-1, callback='yes', confirm=-1):
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="Edit Collection",
body=wsc.perform_deleteportalbox(colID=colID,
ln=ln,
pbxID=pbxID,
callback=callback,
confirm=confirm),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def showportalboxes(req, colID, ln=CFG_SITE_LANG, callback='yes', confirm=0):
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="Edit Collection",
body=wsc.perform_showportalboxes(colID=colID,
ln=ln,
callback=callback,
confirm=confirm),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def addportalbox(req, colID, ln=CFG_SITE_LANG, title='', body='', callback='yes', confirm=-1):
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="Edit Collection",
body=wsc.perform_addportalbox(colID=colID,
ln=ln,
title=title,
body=body,
callback=callback,
confirm=confirm),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def modifycollectiontree(req, colID, ln=CFG_SITE_LANG, move_up='', move_down='', move_from='', move_to='', delete='', rtype='', callback='yes', confirm=0):
navtrail_previous_links = wsc.getnavtrail() + """> <a class="navtrail" href="%s/admin/websearch/websearchadmin.py/">WebSearch Admin</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="WebSearch Admin",
body=wsc.perform_modifycollectiontree(colID=colID,
ln=ln,
move_up=move_up,
move_down=move_down,
move_from=move_from,
move_to=move_to,
delete=delete,
rtype=rtype,
callback=callback,
confirm=confirm),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def index(req, colID=1, ln=CFG_SITE_LANG, mtype='', content='', confirm=0):
navtrail_previous_links = wsc.getnavtrail()
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = check_user(req,'cfgwebsearch')
if not auth[0]:
return page(title="WebSearch Admin",
body=wsc.perform_index(colID=colID,
ln=ln,
mtype=mtype,
content=content,
confirm=confirm),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
|
timblankers/inside-bgp | refs/heads/master | graph.py | 1 | #!/usr/bin/env python3
import datetime as dt
import sys
import operator
import subprocess
import glob
import os
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from matplotlib.dates import YearLocator, MonthLocator, DateFormatter
from collections import defaultdict
def main():
show = defaultdict(list)
total_announcements = []
total_ipv4 = []
total_ipv6 = []
announcements = defaultdict(list)
filenames = []
enc='iso-8859-15'
n_files = 10
city = "amsterdam"
graph_type = "prefix" # prefix as_path_prefix as_path origin_announcement
start_year = 2013
end_year = 2013
if graph_type == "prefix":
display = ['84.205.74.0/24', '2001:7fb:fe0a::/48']
graph_type_total = "total_prefixes"
elif graph_type == "as_path_prefix":
display = ['']
graph_type_total = "total_as_paths_prefixes"
elif graph_type == "as_path":
display = ['']
graph_type_total = "total_as_paths"
elif graph_type == "origin_announcement":
display = ['3549', '3257', '12880']
graph_type_total = "total_origin_announcements"
days = 0
directories = []
for arg in sys.argv:
if arg != sys.argv[0]:
if not os.path.isdir(arg):
print(">> Error: %s not found" % arg)
else:
directories.append(arg)
if len(directories) == 0:
directories = ["graph_data"]
for directory in directories:
for root, dir_names, files in os.walk(directory):
path = root.split('/')
print((len(path) - 1) * '---' , os.path.basename(root))
for update_file in files:
if update_file.endswith(".graph"):
print(len(path) * '---', update_file)
input_file = open(os.path.join(root,update_file), "r", encoding=enc)
ipv4 = 0
ipv6 = 0
for line in input_file:
fields = line.strip().split("|")
if fields[0] == graph_type:
if len(announcements[fields[1]]) == 0:
announcements[fields[1]] = [0] * n_files
announcements[fields[1]][days] = fields[2]
if (graph_type == "prefix"):
if ":" in fields[1]:
ipv6 += int(fields[2])
else:
ipv4 += int(fields[2])
if fields[0] == graph_type_total:
total_announcements.append(fields[1])
total_ipv4.append(ipv4)
total_ipv6.append(ipv6)
input_file.close()
days += 1
dates = []
for year in range(start_year, end_year+1):
# dates.append("01-01-%d" % year)
# for j in range(1, 30):
# dates.append("")
# dates.append("01-02-%d" % year)
# for j in range(1, 28):
# dates.append("")
# dates.append("01-03-%d" % year)
# for j in range(1, 29):
# dates.append("")
dates.append("01-04-%d" % year)
for j in range(1, 30):
dates.append("")
dates.append("01-05-%d" % year)
for j in range(1, 30):
dates.append("")
# dates.append("01-06-%d" % year)
# for j in range(1, 30):
# dates.append("")
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.set_xticks(range(len(dates)))
ax.xaxis.set_ticklabels(dates)
total_announcements = list(map(int, total_announcements))
print(total_announcements)
ax.plot(total_announcements, label=('Total'))
# if (graph_type == "prefix"):
# total_ipv4 = list(map(int, total_ipv4))
# total_ipv6 = list(map(int, total_ipv6))
# ax.plot(total_ipv4, label=('ipv4'))
# ax.plot(total_ipv6, label=('ipv6'))
# full_time = [0] * days
# full_time_amount = 0
# part_time = [0] * days
# part_time_amount = 0
prefix_count = [0] * n_files
full_time_prefix_count = [0] * n_files
test_set = [0] * n_files
test_set2 = [0] * n_files
for key, val in announcements.items():
val = list(map(int, val))
for day in range(0,days):
if val[day] != 0:
prefix_count[day] += 1
if val.count(0) < 2:
full_time_prefix_count[day] += 1
if val[day] < 30:
test_set[day] += val[day]
test_set2[day] += 1
#cc = np.corrcoef(total_announcements, val)
#cc = cc[0][1]
#print("AS: %s\tCC: %s" % (key, cc))
# if 0 not in val or val.count(0) < 6:
# full_time_amount += 1
# full_time = [a+b for a,b in zip(full_time, val)]
# else:
# part_time_amount += 1
# part_time = [a+b for a,b in zip(part_time, val)]
# ax.plot(full_time, label="Full Time Talkers")
# ax.plot(part_time, label="part time talking")
print(prefix_count)
print(full_time_prefix_count)
ax.plot(prefix_count, label=('Prefix Count'))
# ax.plot(full_time_prefix_count, label=('Full Time Prefixes'))
ax.plot(test_set, label=('Test Set'))
ax.plot(test_set2, label=('Test Set Count'))
# print("amount of as always talking:", full_time_amount)
# print("amount of as sometimes talking:", part_time_amount)
legend = plt.legend(loc='upper right', fancybox=True)
legend.get_frame().set_alpha(0.5)
for label in legend.get_lines():
label.set_linewidth(4.0)
plt.xlabel('Year')
plt.ylabel('Amount')
plt.yscale('log')
plt.gcf().autofmt_xdate()
plt.grid(True, which='major')
plt.title(city.title())
#plt.show()
fig.savefig('graph.png', bbox_inches='tight', dpi=200)
if __name__ == '__main__':
main()
|
techtonik/numpy | refs/heads/master | numpy/lib/tests/test_type_check.py | 6 | from __future__ import division, absolute_import, print_function
from numpy.lib import *
from numpy.core import *
from numpy.random import rand
from numpy.compat import asbytes, long
from numpy.testing import (
TestCase, assert_, assert_equal, assert_array_equal, run_module_suite)
try:
import ctypes
_HAS_CTYPE = True
except ImportError:
_HAS_CTYPE = False
def assert_all(x):
assert_(all(x), x)
class TestCommonType(TestCase):
def test_basic(self):
ai32 = array([[1, 2], [3, 4]], dtype=int32)
af32 = array([[1, 2], [3, 4]], dtype=float32)
af64 = array([[1, 2], [3, 4]], dtype=float64)
acs = array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=csingle)
acd = array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=cdouble)
assert_(common_type(af32) == float32)
assert_(common_type(af64) == float64)
assert_(common_type(acs) == csingle)
assert_(common_type(acd) == cdouble)
class TestMintypecode(TestCase):
def test_default_1(self):
for itype in '1bcsuwil':
assert_equal(mintypecode(itype), 'd')
assert_equal(mintypecode('f'), 'f')
assert_equal(mintypecode('d'), 'd')
assert_equal(mintypecode('F'), 'F')
assert_equal(mintypecode('D'), 'D')
def test_default_2(self):
for itype in '1bcsuwil':
assert_equal(mintypecode(itype+'f'), 'f')
assert_equal(mintypecode(itype+'d'), 'd')
assert_equal(mintypecode(itype+'F'), 'F')
assert_equal(mintypecode(itype+'D'), 'D')
assert_equal(mintypecode('ff'), 'f')
assert_equal(mintypecode('fd'), 'd')
assert_equal(mintypecode('fF'), 'F')
assert_equal(mintypecode('fD'), 'D')
assert_equal(mintypecode('df'), 'd')
assert_equal(mintypecode('dd'), 'd')
#assert_equal(mintypecode('dF',savespace=1),'F')
assert_equal(mintypecode('dF'), 'D')
assert_equal(mintypecode('dD'), 'D')
assert_equal(mintypecode('Ff'), 'F')
#assert_equal(mintypecode('Fd',savespace=1),'F')
assert_equal(mintypecode('Fd'), 'D')
assert_equal(mintypecode('FF'), 'F')
assert_equal(mintypecode('FD'), 'D')
assert_equal(mintypecode('Df'), 'D')
assert_equal(mintypecode('Dd'), 'D')
assert_equal(mintypecode('DF'), 'D')
assert_equal(mintypecode('DD'), 'D')
def test_default_3(self):
assert_equal(mintypecode('fdF'), 'D')
#assert_equal(mintypecode('fdF',savespace=1),'F')
assert_equal(mintypecode('fdD'), 'D')
assert_equal(mintypecode('fFD'), 'D')
assert_equal(mintypecode('dFD'), 'D')
assert_equal(mintypecode('ifd'), 'd')
assert_equal(mintypecode('ifF'), 'F')
assert_equal(mintypecode('ifD'), 'D')
assert_equal(mintypecode('idF'), 'D')
#assert_equal(mintypecode('idF',savespace=1),'F')
assert_equal(mintypecode('idD'), 'D')
class TestIsscalar(TestCase):
def test_basic(self):
assert_(isscalar(3))
assert_(not isscalar([3]))
assert_(not isscalar((3,)))
assert_(isscalar(3j))
assert_(isscalar(long(10)))
assert_(isscalar(4.0))
class TestReal(TestCase):
def test_real(self):
y = rand(10,)
assert_array_equal(y, real(y))
def test_cmplx(self):
y = rand(10,)+1j*rand(10,)
assert_array_equal(y.real, real(y))
class TestImag(TestCase):
def test_real(self):
y = rand(10,)
assert_array_equal(0, imag(y))
def test_cmplx(self):
y = rand(10,)+1j*rand(10,)
assert_array_equal(y.imag, imag(y))
class TestIscomplex(TestCase):
def test_fail(self):
z = array([-1, 0, 1])
res = iscomplex(z)
assert_(not sometrue(res, axis=0))
def test_pass(self):
z = array([-1j, 1, 0])
res = iscomplex(z)
assert_array_equal(res, [1, 0, 0])
class TestIsreal(TestCase):
def test_pass(self):
z = array([-1, 0, 1j])
res = isreal(z)
assert_array_equal(res, [1, 1, 0])
def test_fail(self):
z = array([-1j, 1, 0])
res = isreal(z)
assert_array_equal(res, [0, 1, 1])
class TestIscomplexobj(TestCase):
def test_basic(self):
z = array([-1, 0, 1])
assert_(not iscomplexobj(z))
z = array([-1j, 0, -1])
assert_(iscomplexobj(z))
class TestIsrealobj(TestCase):
def test_basic(self):
z = array([-1, 0, 1])
assert_(isrealobj(z))
z = array([-1j, 0, -1])
assert_(not isrealobj(z))
class TestIsnan(TestCase):
def test_goodvalues(self):
z = array((-1., 0., 1.))
res = isnan(z) == 0
assert_all(alltrue(res, axis=0))
def test_posinf(self):
with errstate(divide='ignore'):
assert_all(isnan(array((1.,))/0.) == 0)
def test_neginf(self):
with errstate(divide='ignore'):
assert_all(isnan(array((-1.,))/0.) == 0)
def test_ind(self):
with errstate(divide='ignore', invalid='ignore'):
assert_all(isnan(array((0.,))/0.) == 1)
#def test_qnan(self): log(-1) return pi*j now
# assert_all(isnan(log(-1.)) == 1)
def test_integer(self):
assert_all(isnan(1) == 0)
def test_complex(self):
assert_all(isnan(1+1j) == 0)
def test_complex1(self):
with errstate(divide='ignore', invalid='ignore'):
assert_all(isnan(array(0+0j)/0.) == 1)
class TestIsfinite(TestCase):
def test_goodvalues(self):
z = array((-1., 0., 1.))
res = isfinite(z) == 1
assert_all(alltrue(res, axis=0))
def test_posinf(self):
with errstate(divide='ignore', invalid='ignore'):
assert_all(isfinite(array((1.,))/0.) == 0)
def test_neginf(self):
with errstate(divide='ignore', invalid='ignore'):
assert_all(isfinite(array((-1.,))/0.) == 0)
def test_ind(self):
with errstate(divide='ignore', invalid='ignore'):
assert_all(isfinite(array((0.,))/0.) == 0)
#def test_qnan(self):
# assert_all(isfinite(log(-1.)) == 0)
def test_integer(self):
assert_all(isfinite(1) == 1)
def test_complex(self):
assert_all(isfinite(1+1j) == 1)
def test_complex1(self):
with errstate(divide='ignore', invalid='ignore'):
assert_all(isfinite(array(1+1j)/0.) == 0)
class TestIsinf(TestCase):
def test_goodvalues(self):
z = array((-1., 0., 1.))
res = isinf(z) == 0
assert_all(alltrue(res, axis=0))
def test_posinf(self):
with errstate(divide='ignore', invalid='ignore'):
assert_all(isinf(array((1.,))/0.) == 1)
def test_posinf_scalar(self):
with errstate(divide='ignore', invalid='ignore'):
assert_all(isinf(array(1.,)/0.) == 1)
def test_neginf(self):
with errstate(divide='ignore', invalid='ignore'):
assert_all(isinf(array((-1.,))/0.) == 1)
def test_neginf_scalar(self):
with errstate(divide='ignore', invalid='ignore'):
assert_all(isinf(array(-1.)/0.) == 1)
def test_ind(self):
with errstate(divide='ignore', invalid='ignore'):
assert_all(isinf(array((0.,))/0.) == 0)
#def test_qnan(self):
# assert_all(isinf(log(-1.)) == 0)
# assert_all(isnan(log(-1.)) == 1)
class TestIsposinf(TestCase):
def test_generic(self):
with errstate(divide='ignore', invalid='ignore'):
vals = isposinf(array((-1., 0, 1))/0.)
assert_(vals[0] == 0)
assert_(vals[1] == 0)
assert_(vals[2] == 1)
class TestIsneginf(TestCase):
def test_generic(self):
with errstate(divide='ignore', invalid='ignore'):
vals = isneginf(array((-1., 0, 1))/0.)
assert_(vals[0] == 1)
assert_(vals[1] == 0)
assert_(vals[2] == 0)
class TestNanToNum(TestCase):
def test_generic(self):
with errstate(divide='ignore', invalid='ignore'):
vals = nan_to_num(array((-1., 0, 1))/0.)
assert_all(vals[0] < -1e10) and assert_all(isfinite(vals[0]))
assert_(vals[1] == 0)
assert_all(vals[2] > 1e10) and assert_all(isfinite(vals[2]))
def test_integer(self):
vals = nan_to_num(1)
assert_all(vals == 1)
def test_complex_good(self):
vals = nan_to_num(1+1j)
assert_all(vals == 1+1j)
def test_complex_bad(self):
with errstate(divide='ignore', invalid='ignore'):
v = 1 + 1j
v += array(0+1.j)/0.
vals = nan_to_num(v)
# !! This is actually (unexpectedly) zero
assert_all(isfinite(vals))
def test_complex_bad2(self):
with errstate(divide='ignore', invalid='ignore'):
v = 1 + 1j
v += array(-1+1.j)/0.
vals = nan_to_num(v)
assert_all(isfinite(vals))
#assert_all(vals.imag > 1e10) and assert_all(isfinite(vals))
# !! This is actually (unexpectedly) positive
# !! inf. Comment out for now, and see if it
# !! changes
#assert_all(vals.real < -1e10) and assert_all(isfinite(vals))
class TestRealIfClose(TestCase):
def test_basic(self):
a = rand(10)
b = real_if_close(a+1e-15j)
assert_all(isrealobj(b))
assert_array_equal(a, b)
b = real_if_close(a+1e-7j)
assert_all(iscomplexobj(b))
b = real_if_close(a+1e-7j, tol=1e-6)
assert_all(isrealobj(b))
class TestArrayConversion(TestCase):
def test_asfarray(self):
a = asfarray(array([1, 2, 3]))
assert_equal(a.__class__, ndarray)
assert_(issubdtype(a.dtype, float))
if __name__ == "__main__":
run_module_suite()
|
brownharryb/erpnext | refs/heads/develop | erpnext/hr/doctype/attendance/test_attendance.py | 113 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
test_records = frappe.get_test_records('Attendance')
class TestAttendance(unittest.TestCase):
pass
|
Allow2CEO/browser-ios | refs/heads/master | brave/node_modules/bloom-filter-cpp/vendor/depot_tools/third_party/protobuf26/message.py | 57 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# TODO(robinson): We should just make these methods all "pure-virtual" and move
# all implementation out, into reflection.py for now.
"""Contains an abstract base class for protocol messages."""
__author__ = 'robinson@google.com (Will Robinson)'
class Error(Exception): pass
class DecodeError(Error): pass
class EncodeError(Error): pass
class Message(object):
"""Abstract base class for protocol messages.
Protocol message classes are almost always generated by the protocol
compiler. These generated types subclass Message and implement the methods
shown below.
TODO(robinson): Link to an HTML document here.
TODO(robinson): Document that instances of this class will also
have an Extensions attribute with __getitem__ and __setitem__.
Again, not sure how to best convey this.
TODO(robinson): Document that the class must also have a static
RegisterExtension(extension_field) method.
Not sure how to best express at this point.
"""
# TODO(robinson): Document these fields and methods.
__slots__ = []
DESCRIPTOR = None
def __deepcopy__(self, memo=None):
clone = type(self)()
clone.MergeFrom(self)
return clone
def __eq__(self, other_msg):
"""Recursively compares two messages by value and structure."""
raise NotImplementedError
def __ne__(self, other_msg):
# Can't just say self != other_msg, since that would infinitely recurse. :)
return not self == other_msg
def __hash__(self):
raise TypeError('unhashable object')
def __str__(self):
"""Outputs a human-readable representation of the message."""
raise NotImplementedError
def __unicode__(self):
"""Outputs a human-readable representation of the message."""
raise NotImplementedError
def MergeFrom(self, other_msg):
"""Merges the contents of the specified message into current message.
This method merges the contents of the specified message into the current
message. Singular fields that are set in the specified message overwrite
the corresponding fields in the current message. Repeated fields are
appended. Singular sub-messages and groups are recursively merged.
Args:
other_msg: Message to merge into the current message.
"""
raise NotImplementedError
def CopyFrom(self, other_msg):
"""Copies the content of the specified message into the current message.
The method clears the current message and then merges the specified
message using MergeFrom.
Args:
other_msg: Message to copy into the current one.
"""
if self is other_msg:
return
self.Clear()
self.MergeFrom(other_msg)
def Clear(self):
"""Clears all data that was set in the message."""
raise NotImplementedError
def SetInParent(self):
"""Mark this as present in the parent.
This normally happens automatically when you assign a field of a
sub-message, but sometimes you want to make the sub-message
present while keeping it empty. If you find yourself using this,
you may want to reconsider your design."""
raise NotImplementedError
def IsInitialized(self):
"""Checks if the message is initialized.
Returns:
The method returns True if the message is initialized (i.e. all of its
required fields are set).
"""
raise NotImplementedError
# TODO(robinson): MergeFromString() should probably return None and be
# implemented in terms of a helper that returns the # of bytes read. Our
# deserialization routines would use the helper when recursively
# deserializing, but the end user would almost always just want the no-return
# MergeFromString().
def MergeFromString(self, serialized):
"""Merges serialized protocol buffer data into this message.
When we find a field in |serialized| that is already present
in this message:
- If it's a "repeated" field, we append to the end of our list.
- Else, if it's a scalar, we overwrite our field.
- Else, (it's a nonrepeated composite), we recursively merge
into the existing composite.
TODO(robinson): Document handling of unknown fields.
Args:
serialized: Any object that allows us to call buffer(serialized)
to access a string of bytes using the buffer interface.
TODO(robinson): When we switch to a helper, this will return None.
Returns:
The number of bytes read from |serialized|.
For non-group messages, this will always be len(serialized),
but for messages which are actually groups, this will
generally be less than len(serialized), since we must
stop when we reach an END_GROUP tag. Note that if
we *do* stop because of an END_GROUP tag, the number
of bytes returned does not include the bytes
for the END_GROUP tag information.
"""
raise NotImplementedError
def ParseFromString(self, serialized):
"""Parse serialized protocol buffer data into this message.
Like MergeFromString(), except we clear the object first and
do not return the value that MergeFromString returns.
"""
self.Clear()
self.MergeFromString(serialized)
def SerializeToString(self):
"""Serializes the protocol message to a binary string.
Returns:
A binary string representation of the message if all of the required
fields in the message are set (i.e. the message is initialized).
Raises:
message.EncodeError if the message isn't initialized.
"""
raise NotImplementedError
def SerializePartialToString(self):
"""Serializes the protocol message to a binary string.
This method is similar to SerializeToString but doesn't check if the
message is initialized.
Returns:
A string representation of the partial message.
"""
raise NotImplementedError
# TODO(robinson): Decide whether we like these better
# than auto-generated has_foo() and clear_foo() methods
# on the instances themselves. This way is less consistent
# with C++, but it makes reflection-type access easier and
# reduces the number of magically autogenerated things.
#
# TODO(robinson): Be sure to document (and test) exactly
# which field names are accepted here. Are we case-sensitive?
# What do we do with fields that share names with Python keywords
# like 'lambda' and 'yield'?
#
# nnorwitz says:
# """
# Typically (in python), an underscore is appended to names that are
# keywords. So they would become lambda_ or yield_.
# """
def ListFields(self):
"""Returns a list of (FieldDescriptor, value) tuples for all
fields in the message which are not empty. A singular field is non-empty
if HasField() would return true, and a repeated field is non-empty if
it contains at least one element. The fields are ordered by field
number"""
raise NotImplementedError
def HasField(self, field_name):
"""Checks if a certain field is set for the message. Note if the
field_name is not defined in the message descriptor, ValueError will be
raised."""
raise NotImplementedError
def ClearField(self, field_name):
raise NotImplementedError
def HasExtension(self, extension_handle):
raise NotImplementedError
def ClearExtension(self, extension_handle):
raise NotImplementedError
def ByteSize(self):
"""Returns the serialized size of this message.
Recursively calls ByteSize() on all contained messages.
"""
raise NotImplementedError
def _SetListener(self, message_listener):
"""Internal method used by the protocol message implementation.
Clients should not call this directly.
Sets a listener that this message will call on certain state transitions.
The purpose of this method is to register back-edges from children to
parents at runtime, for the purpose of setting "has" bits and
byte-size-dirty bits in the parent and ancestor objects whenever a child or
descendant object is modified.
If the client wants to disconnect this Message from the object tree, she
explicitly sets callback to None.
If message_listener is None, unregisters any existing listener. Otherwise,
message_listener must implement the MessageListener interface in
internal/message_listener.py, and we discard any listener registered
via a previous _SetListener() call.
"""
raise NotImplementedError
def __getstate__(self):
"""Support the pickle protocol."""
return dict(serialized=self.SerializePartialToString())
def __setstate__(self, state):
"""Support the pickle protocol."""
self.__init__()
self.ParseFromString(state['serialized'])
|
colinbrislawn/scikit-bio | refs/heads/master | skbio/alignment/tests/__init__.py | 160 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
|
sertac/django | refs/heads/master | django/db/models/fields/subclassing.py | 265 | """
Convenience routines for creating non-trivial Field subclasses, as well as
backwards compatibility utilities.
Add SubfieldBase as the metaclass for your Field subclass, implement
to_python() and the other necessary methods and everything will work
seamlessly.
"""
import warnings
from django.utils.deprecation import RemovedInDjango110Warning
class SubfieldBase(type):
"""
A metaclass for custom Field subclasses. This ensures the model's attribute
has the descriptor protocol attached to it.
"""
def __new__(cls, name, bases, attrs):
warnings.warn("SubfieldBase has been deprecated. Use Field.from_db_value instead.",
RemovedInDjango110Warning)
new_class = super(SubfieldBase, cls).__new__(cls, name, bases, attrs)
new_class.contribute_to_class = make_contrib(
new_class, attrs.get('contribute_to_class')
)
return new_class
class Creator(object):
"""
A placeholder class that provides a way to set the attribute on the model.
"""
def __init__(self, field):
self.field = field
def __get__(self, obj, type=None):
if obj is None:
return self
return obj.__dict__[self.field.name]
def __set__(self, obj, value):
obj.__dict__[self.field.name] = self.field.to_python(value)
def make_contrib(superclass, func=None):
"""
Returns a suitable contribute_to_class() method for the Field subclass.
If 'func' is passed in, it is the existing contribute_to_class() method on
the subclass and it is called before anything else. It is assumed in this
case that the existing contribute_to_class() calls all the necessary
superclass methods.
"""
def contribute_to_class(self, cls, name, **kwargs):
if func:
func(self, cls, name, **kwargs)
else:
super(superclass, self).contribute_to_class(cls, name, **kwargs)
setattr(cls, self.name, Creator(self))
return contribute_to_class
|
cloudera/hue | refs/heads/master | desktop/core/ext-py/Django-1.11.29/django/utils/archive.py | 45 | """
Based on "python-archive" -- http://pypi.python.org/pypi/python-archive/
Copyright (c) 2010 Gary Wilson Jr. <gary.wilson@gmail.com> and contributors.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
import shutil
import stat
import tarfile
import zipfile
from django.utils import six
class ArchiveException(Exception):
"""
Base exception class for all archive errors.
"""
class UnrecognizedArchiveFormat(ArchiveException):
"""
Error raised when passed file is not a recognized archive format.
"""
def extract(path, to_path=''):
"""
Unpack the tar or zip file at the specified path to the directory
specified by to_path.
"""
with Archive(path) as archive:
archive.extract(to_path)
class Archive(object):
"""
The external API class that encapsulates an archive implementation.
"""
def __init__(self, file):
self._archive = self._archive_cls(file)(file)
@staticmethod
def _archive_cls(file):
cls = None
if isinstance(file, six.string_types):
filename = file
else:
try:
filename = file.name
except AttributeError:
raise UnrecognizedArchiveFormat(
"File object not a recognized archive format.")
base, tail_ext = os.path.splitext(filename.lower())
cls = extension_map.get(tail_ext)
if not cls:
base, ext = os.path.splitext(base)
cls = extension_map.get(ext)
if not cls:
raise UnrecognizedArchiveFormat(
"Path not a recognized archive format: %s" % filename)
return cls
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def extract(self, to_path=''):
self._archive.extract(to_path)
def list(self):
self._archive.list()
def close(self):
self._archive.close()
class BaseArchive(object):
"""
Base Archive class. Implementations should inherit this class.
"""
@staticmethod
def _copy_permissions(mode, filename):
"""
If the file in the archive has some permissions (this assumes a file
won't be writable/executable without being readable), apply those
permissions to the unarchived file.
"""
if mode & stat.S_IROTH:
os.chmod(filename, mode)
def split_leading_dir(self, path):
path = str(path)
path = path.lstrip('/').lstrip('\\')
if '/' in path and (('\\' in path and path.find('/') < path.find('\\')) or '\\' not in path):
return path.split('/', 1)
elif '\\' in path:
return path.split('\\', 1)
else:
return path, ''
def has_leading_dir(self, paths):
"""
Returns true if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive)
"""
common_prefix = None
for path in paths:
prefix, rest = self.split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def extract(self):
raise NotImplementedError('subclasses of BaseArchive must provide an extract() method')
def list(self):
raise NotImplementedError('subclasses of BaseArchive must provide a list() method')
class TarArchive(BaseArchive):
def __init__(self, file):
self._archive = tarfile.open(file)
def list(self, *args, **kwargs):
self._archive.list(*args, **kwargs)
def extract(self, to_path):
members = self._archive.getmembers()
leading = self.has_leading_dir(x.name for x in members)
for member in members:
name = member.name
if leading:
name = self.split_leading_dir(name)[1]
filename = os.path.join(to_path, name)
if member.isdir():
if filename and not os.path.exists(filename):
os.makedirs(filename)
else:
try:
extracted = self._archive.extractfile(member)
except (KeyError, AttributeError) as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
print("In the tar file %s the member %s is invalid: %s" %
(name, member.name, exc))
else:
dirname = os.path.dirname(filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
with open(filename, 'wb') as outfile:
shutil.copyfileobj(extracted, outfile)
self._copy_permissions(member.mode, filename)
finally:
if extracted:
extracted.close()
def close(self):
self._archive.close()
class ZipArchive(BaseArchive):
def __init__(self, file):
self._archive = zipfile.ZipFile(file)
def list(self, *args, **kwargs):
self._archive.printdir(*args, **kwargs)
def extract(self, to_path):
namelist = self._archive.namelist()
leading = self.has_leading_dir(namelist)
for name in namelist:
data = self._archive.read(name)
info = self._archive.getinfo(name)
if leading:
name = self.split_leading_dir(name)[1]
filename = os.path.join(to_path, name)
dirname = os.path.dirname(filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
if filename.endswith(('/', '\\')):
# A directory
if not os.path.exists(filename):
os.makedirs(filename)
else:
with open(filename, 'wb') as outfile:
outfile.write(data)
# Convert ZipInfo.external_attr to mode
mode = info.external_attr >> 16
self._copy_permissions(mode, filename)
def close(self):
self._archive.close()
extension_map = {
'.tar': TarArchive,
'.tar.bz2': TarArchive,
'.tar.gz': TarArchive,
'.tgz': TarArchive,
'.tz2': TarArchive,
'.zip': ZipArchive,
}
|
w1ll1am23/home-assistant | refs/heads/dev | homeassistant/components/otp/__init__.py | 36 | """The otp component."""
|
0k/OpenUpgrade | refs/heads/8.0 | addons/subscription/__init__.py | 441 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import subscription
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
darkleons/BE | refs/heads/master | addons/l10n_be/wizard/l10n_be_vat_intra.py | 332 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# Adapted by Noviat to
# - make the 'mand_id' field optional
# - support Noviat tax code scheme
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import base64
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.report import report_sxw
class partner_vat_intra(osv.osv_memory):
"""
Partner Vat Intra
"""
_name = "partner.vat.intra"
_description = 'Partner VAT Intra'
def _get_xml_data(self, cr, uid, context=None):
if context.get('file_save', False):
return base64.encodestring(context['file_save'].encode('utf8'))
return ''
def _get_europe_country(self, cursor, user, context=None):
return self.pool.get('res.country').search(cursor, user, [('code', 'in', ['AT', 'BG', 'CY', 'CZ', 'DK', 'EE', 'FI', 'FR', 'DE', 'GR', 'HU', 'IE', 'IT', 'LV', 'LT', 'LU', 'MT', 'NL', 'PL', 'PT', 'RO', 'SK', 'SI', 'ES', 'SE', 'GB'])])
_columns = {
'name': fields.char('File Name'),
'period_code': fields.char('Period Code', size=6, required=True, help='''This is where you have to set the period code for the intracom declaration using the format: ppyyyy
PP can stand for a month: from '01' to '12'.
PP can stand for a trimester: '31','32','33','34'
The first figure means that it is a trimester,
The second figure identify the trimester.
PP can stand for a complete fiscal year: '00'.
YYYY stands for the year (4 positions).
'''
),
'period_ids': fields.many2many('account.period', 'account_period_rel', 'acc_id', 'period_id', 'Period (s)', help = 'Select here the period(s) you want to include in your intracom declaration'),
'tax_code_id': fields.many2one('account.tax.code', 'Company', domain=[('parent_id', '=', False)], help="Keep empty to use the user's company", required=True),
'test_xml': fields.boolean('Test XML file', help="Sets the XML output as test file"),
'mand_id' : fields.char('Reference', help="Reference given by the Representative of the sending company."),
'msg': fields.text('File created', readonly=True),
'no_vat': fields.text('Partner With No VAT', readonly=True, help="The Partner whose VAT number is not defined and they are not included in XML File."),
'file_save' : fields.binary('Save File', readonly=True),
'country_ids': fields.many2many('res.country', 'vat_country_rel', 'vat_id', 'country_id', 'European Countries'),
'comments': fields.text('Comments'),
}
def _get_tax_code(self, cr, uid, context=None):
obj_tax_code = self.pool.get('account.tax.code')
obj_user = self.pool.get('res.users')
company_id = obj_user.browse(cr, uid, uid, context=context).company_id.id
tax_code_ids = obj_tax_code.search(cr, uid, [('company_id', '=', company_id), ('parent_id', '=', False)], context=context)
return tax_code_ids and tax_code_ids[0] or False
_defaults = {
'country_ids': _get_europe_country,
'file_save': _get_xml_data,
'name': 'vat_intra.xml',
'tax_code_id': _get_tax_code,
}
def _get_datas(self, cr, uid, ids, context=None):
"""Collects require data for vat intra xml
:param ids: id of wizard.
:return: dict of all data to be used to generate xml for Partner VAT Intra.
:rtype: dict
"""
if context is None:
context = {}
obj_user = self.pool.get('res.users')
obj_sequence = self.pool.get('ir.sequence')
obj_partner = self.pool.get('res.partner')
xmldict = {}
post_code = street = city = country = data_clientinfo = ''
seq = amount_sum = 0
wiz_data = self.browse(cr, uid, ids[0], context=context)
comments = wiz_data.comments
if wiz_data.tax_code_id:
data_company = wiz_data.tax_code_id.company_id
else:
data_company = obj_user.browse(cr, uid, uid, context=context).company_id
# Get Company vat
company_vat = data_company.partner_id.vat
if not company_vat:
raise osv.except_osv(_('Insufficient Data!'),_('No VAT number associated with your company.'))
company_vat = company_vat.replace(' ','').upper()
issued_by = company_vat[:2]
if len(wiz_data.period_code) != 6:
raise osv.except_osv(_('Error!'), _('Period code is not valid.'))
if not wiz_data.period_ids:
raise osv.except_osv(_('Insufficient Data!'),_('Please select at least one Period.'))
p_id_list = obj_partner.search(cr, uid, [('vat','!=',False)], context=context)
if not p_id_list:
raise osv.except_osv(_('Insufficient Data!'),_('No partner has a VAT number associated with him.'))
seq_declarantnum = obj_sequence.get(cr, uid, 'declarantnum')
dnum = company_vat[2:] + seq_declarantnum[-4:]
addr = obj_partner.address_get(cr, uid, [data_company.partner_id.id], ['invoice'])
email = data_company.partner_id.email or ''
phone = data_company.partner_id.phone or ''
if addr.get('invoice',False):
ads = obj_partner.browse(cr, uid, [addr['invoice']])[0]
city = (ads.city or '')
post_code = (ads.zip or '')
if ads.street:
street = ads.street
if ads.street2:
street += ' '
street += ads.street2
if ads.country_id:
country = ads.country_id.code
if not country:
country = company_vat[:2]
if not email:
raise osv.except_osv(_('Insufficient Data!'),_('No email address associated with the company.'))
if not phone:
raise osv.except_osv(_('Insufficient Data!'),_('No phone associated with the company.'))
xmldict.update({
'company_name': data_company.name,
'company_vat': company_vat,
'vatnum': company_vat[2:],
'mand_id': wiz_data.mand_id,
'sender_date': str(time.strftime('%Y-%m-%d')),
'street': street,
'city': city,
'post_code': post_code,
'country': country,
'email': email,
'phone': phone.replace('/','').replace('.','').replace('(','').replace(')','').replace(' ',''),
'period': wiz_data.period_code,
'clientlist': [],
'comments': comments,
'issued_by': issued_by,
})
#tax code 44: services
#tax code 46L: normal good deliveries
#tax code 46T: ABC good deliveries
#tax code 48xxx: credite note on tax code xxx
codes = ('44', '46L', '46T', '48s44', '48s46L', '48s46T')
cr.execute('''SELECT p.name As partner_name, l.partner_id AS partner_id, p.vat AS vat,
(CASE WHEN t.code = '48s44' THEN '44'
WHEN t.code = '48s46L' THEN '46L'
WHEN t.code = '48s46T' THEN '46T'
ELSE t.code END) AS intra_code,
SUM(CASE WHEN t.code in ('48s44','48s46L','48s46T') THEN -l.tax_amount ELSE l.tax_amount END) AS amount
FROM account_move_line l
LEFT JOIN account_tax_code t ON (l.tax_code_id = t.id)
LEFT JOIN res_partner p ON (l.partner_id = p.id)
WHERE t.code IN %s
AND l.period_id IN %s
AND t.company_id = %s
GROUP BY p.name, l.partner_id, p.vat, intra_code''', (codes, tuple([p.id for p in wiz_data.period_ids]), data_company.id))
p_count = 0
for row in cr.dictfetchall():
if not row['vat']:
row['vat'] = ''
p_count += 1
seq += 1
amt = row['amount'] or 0.0
amount_sum += amt
intra_code = row['intra_code'] == '44' and 'S' or (row['intra_code'] == '46L' and 'L' or (row['intra_code'] == '46T' and 'T' or ''))
xmldict['clientlist'].append({
'partner_name': row['partner_name'],
'seq': seq,
'vatnum': row['vat'][2:].replace(' ','').upper(),
'vat': row['vat'],
'country': row['vat'][:2],
'amount': round(amt,2),
'intra_code': row['intra_code'],
'code': intra_code})
xmldict.update({'dnum': dnum, 'clientnbr': str(seq), 'amountsum': round(amount_sum,2), 'partner_wo_vat': p_count})
return xmldict
def create_xml(self, cursor, user, ids, context=None):
"""Creates xml that is to be exported and sent to estate for partner vat intra.
:return: Value for next action.
:rtype: dict
"""
mod_obj = self.pool.get('ir.model.data')
xml_data = self._get_datas(cursor, user, ids, context=context)
month_quarter = xml_data['period'][:2]
year = xml_data['period'][2:]
data_file = ''
# Can't we do this by etree?
data_head = """<?xml version="1.0" encoding="ISO-8859-1"?>
<ns2:IntraConsignment xmlns="http://www.minfin.fgov.be/InputCommon" xmlns:ns2="http://www.minfin.fgov.be/IntraConsignment" IntraListingsNbr="1">
<ns2:Representative>
<RepresentativeID identificationType="NVAT" issuedBy="%(issued_by)s">%(vatnum)s</RepresentativeID>
<Name>%(company_name)s</Name>
<Street>%(street)s</Street>
<PostCode>%(post_code)s</PostCode>
<City>%(city)s</City>
<CountryCode>%(country)s</CountryCode>
<EmailAddress>%(email)s</EmailAddress>
<Phone>%(phone)s</Phone>
</ns2:Representative>""" % (xml_data)
if xml_data['mand_id']:
data_head += '\n\t\t<ns2:RepresentativeReference>%(mand_id)s</ns2:RepresentativeReference>' % (xml_data)
data_comp_period = '\n\t\t<ns2:Declarant>\n\t\t\t<VATNumber>%(vatnum)s</VATNumber>\n\t\t\t<Name>%(company_name)s</Name>\n\t\t\t<Street>%(street)s</Street>\n\t\t\t<PostCode>%(post_code)s</PostCode>\n\t\t\t<City>%(city)s</City>\n\t\t\t<CountryCode>%(country)s</CountryCode>\n\t\t\t<EmailAddress>%(email)s</EmailAddress>\n\t\t\t<Phone>%(phone)s</Phone>\n\t\t</ns2:Declarant>' % (xml_data)
if month_quarter.startswith('3'):
data_comp_period += '\n\t\t<ns2:Period>\n\t\t\t<ns2:Quarter>'+month_quarter[1]+'</ns2:Quarter> \n\t\t\t<ns2:Year>'+year+'</ns2:Year>\n\t\t</ns2:Period>'
elif month_quarter.startswith('0') and month_quarter.endswith('0'):
data_comp_period+= '\n\t\t<ns2:Period>\n\t\t\t<ns2:Year>'+year+'</ns2:Year>\n\t\t</ns2:Period>'
else:
data_comp_period += '\n\t\t<ns2:Period>\n\t\t\t<ns2:Month>'+month_quarter+'</ns2:Month> \n\t\t\t<ns2:Year>'+year+'</ns2:Year>\n\t\t</ns2:Period>'
data_clientinfo = ''
for client in xml_data['clientlist']:
if not client['vatnum']:
raise osv.except_osv(_('Insufficient Data!'),_('No vat number defined for %s.') % client['partner_name'])
data_clientinfo +='\n\t\t<ns2:IntraClient SequenceNumber="%(seq)s">\n\t\t\t<ns2:CompanyVATNumber issuedBy="%(country)s">%(vatnum)s</ns2:CompanyVATNumber>\n\t\t\t<ns2:Code>%(code)s</ns2:Code>\n\t\t\t<ns2:Amount>%(amount).2f</ns2:Amount>\n\t\t</ns2:IntraClient>' % (client)
data_decl = '\n\t<ns2:IntraListing SequenceNumber="1" ClientsNbr="%(clientnbr)s" DeclarantReference="%(dnum)s" AmountSum="%(amountsum).2f">' % (xml_data)
data_file += data_head + data_decl + data_comp_period + data_clientinfo + '\n\t\t<ns2:Comment>%(comments)s</ns2:Comment>\n\t</ns2:IntraListing>\n</ns2:IntraConsignment>' % (xml_data)
context = dict(context or {})
context['file_save'] = data_file
model_data_ids = mod_obj.search(cursor, user,[('model','=','ir.ui.view'),('name','=','view_vat_intra_save')], context=context)
resource_id = mod_obj.read(cursor, user, model_data_ids, fields=['res_id'], context=context)[0]['res_id']
return {
'name': _('Save'),
'context': context,
'view_type': 'form',
'view_mode': 'form',
'res_model': 'partner.vat.intra',
'views': [(resource_id,'form')],
'view_id': 'view_vat_intra_save',
'type': 'ir.actions.act_window',
'target': 'new',
}
def preview(self, cr, uid, ids, context=None):
xml_data = self._get_datas(cr, uid, ids, context=context)
datas = {
'ids': [],
'model': 'partner.vat.intra',
'form': xml_data
}
return self.pool['report'].get_action(
cr, uid, [], 'l10n_be.report_l10nvatintraprint', data=datas, context=context
)
class vat_intra_print(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(vat_intra_print, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
})
class wrapped_vat_intra_print(osv.AbstractModel):
_name = 'report.l10n_be.report_l10nvatintraprint'
_inherit = 'report.abstract_report'
_template = 'l10n_be.report_l10nvatintraprint'
_wrapped_report_class = vat_intra_print
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Jgarcia-IAS/localizacion | refs/heads/master | openerp/addons-extra/odoo-pruebas/odoo-server/addons-extra/cost_center_asset/__init__.py | 7 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2013 Cubic ERP - Teradata SAC. (http://cubicerp.com).
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import account
import account_asset |
mcdaniel67/sympy | refs/heads/master | sympy/plotting/pygletplot/plot_mode_base.py | 96 | from __future__ import print_function, division
from pyglet.gl import *
from plot_mode import PlotMode
from threading import Thread, Event, RLock
from color_scheme import ColorScheme
from sympy.core import S
from sympy.core.compatibility import is_sequence
from time import sleep
import warnings
class PlotModeBase(PlotMode):
"""
Intended parent class for plotting
modes. Provides base functionality
in conjunction with its parent,
PlotMode.
"""
##
## Class-Level Attributes
##
"""
The following attributes are meant
to be set at the class level, and serve
as parameters to the plot mode registry
(in PlotMode). See plot_modes.py for
concrete examples.
"""
"""
i_vars
'x' for Cartesian2D
'xy' for Cartesian3D
etc.
d_vars
'y' for Cartesian2D
'r' for Polar
etc.
"""
i_vars, d_vars = '', ''
"""
intervals
Default intervals for each i_var, and in the
same order. Specified [min, max, steps].
No variable can be given (it is bound later).
"""
intervals = []
"""
aliases
A list of strings which can be used to
access this mode.
'cartesian' for Cartesian2D and Cartesian3D
'polar' for Polar
'cylindrical', 'polar' for Cylindrical
Note that _init_mode chooses the first alias
in the list as the mode's primary_alias, which
will be displayed to the end user in certain
contexts.
"""
aliases = []
"""
is_default
Whether to set this mode as the default
for arguments passed to PlotMode() containing
the same number of d_vars as this mode and
at most the same number of i_vars.
"""
is_default = False
"""
All of the above attributes are defined in PlotMode.
The following ones are specific to PlotModeBase.
"""
"""
A list of the render styles. Do not modify.
"""
styles = {'wireframe': 1, 'solid': 2, 'both': 3}
"""
style_override
Always use this style if not blank.
"""
style_override = ''
"""
default_wireframe_color
default_solid_color
Can be used when color is None or being calculated.
Used by PlotCurve and PlotSurface, but not anywhere
in PlotModeBase.
"""
default_wireframe_color = (0.85, 0.85, 0.85)
default_solid_color = (0.6, 0.6, 0.9)
default_rot_preset = 'xy'
##
## Instance-Level Attributes
##
## 'Abstract' member functions
def _get_evaluator(self):
if self.use_lambda_eval:
try:
e = self._get_lambda_evaluator()
return e
except Exception:
warnings.warn("\nWarning: creating lambda evaluator failed. "
"Falling back on sympy subs evaluator.")
return self._get_sympy_evaluator()
def _get_sympy_evaluator(self):
raise NotImplementedError()
def _get_lambda_evaluator(self):
raise NotImplementedError()
def _on_calculate_verts(self):
raise NotImplementedError()
def _on_calculate_cverts(self):
raise NotImplementedError()
## Base member functions
def __init__(self, *args, **kwargs):
self.verts = []
self.cverts = []
self.bounds = [[S.Infinity, -S.Infinity, 0],
[S.Infinity, -S.Infinity, 0],
[S.Infinity, -S.Infinity, 0]]
self.cbounds = [[S.Infinity, -S.Infinity, 0],
[S.Infinity, -S.Infinity, 0],
[S.Infinity, -S.Infinity, 0]]
self._draw_lock = RLock()
self._calculating_verts = Event()
self._calculating_cverts = Event()
self._calculating_verts_pos = 0.0
self._calculating_verts_len = 0.0
self._calculating_cverts_pos = 0.0
self._calculating_cverts_len = 0.0
self._max_render_stack_size = 3
self._draw_wireframe = [-1]
self._draw_solid = [-1]
self._style = None
self._color = None
self.predraw = []
self.postdraw = []
self.use_lambda_eval = self.options.pop('use_sympy_eval', None) is None
self.style = self.options.pop('style', '')
self.color = self.options.pop('color', 'rainbow')
self.bounds_callback = kwargs.pop('bounds_callback', None)
self._on_calculate()
def synchronized(f):
def w(self, *args, **kwargs):
self._draw_lock.acquire()
try:
r = f(self, *args, **kwargs)
return r
finally:
self._draw_lock.release()
return w
@synchronized
def push_wireframe(self, function):
"""
Push a function which performs gl commands
used to build a display list. (The list is
built outside of the function)
"""
assert callable(function)
self._draw_wireframe.append(function)
if len(self._draw_wireframe) > self._max_render_stack_size:
del self._draw_wireframe[1] # leave marker element
@synchronized
def push_solid(self, function):
"""
Push a function which performs gl commands
used to build a display list. (The list is
built outside of the function)
"""
assert callable(function)
self._draw_solid.append(function)
if len(self._draw_solid) > self._max_render_stack_size:
del self._draw_solid[1] # leave marker element
def _create_display_list(self, function):
dl = glGenLists(1)
glNewList(dl, GL_COMPILE)
function()
glEndList()
return dl
def _render_stack_top(self, render_stack):
top = render_stack[-1]
if top == -1:
return -1 # nothing to display
elif callable(top):
dl = self._create_display_list(top)
render_stack[-1] = (dl, top)
return dl # display newly added list
elif len(top) == 2:
if GL_TRUE == glIsList(top[0]):
return top[0] # display stored list
dl = self._create_display_list(top[1])
render_stack[-1] = (dl, top[1])
return dl # display regenerated list
def _draw_solid_display_list(self, dl):
glPushAttrib(GL_ENABLE_BIT | GL_POLYGON_BIT)
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
glCallList(dl)
glPopAttrib()
def _draw_wireframe_display_list(self, dl):
glPushAttrib(GL_ENABLE_BIT | GL_POLYGON_BIT)
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
glEnable(GL_POLYGON_OFFSET_LINE)
glPolygonOffset(-0.005, -50.0)
glCallList(dl)
glPopAttrib()
@synchronized
def draw(self):
for f in self.predraw:
if callable(f):
f()
if self.style_override:
style = self.styles[self.style_override]
else:
style = self.styles[self._style]
# Draw solid component if style includes solid
if style & 2:
dl = self._render_stack_top(self._draw_solid)
if dl > 0 and GL_TRUE == glIsList(dl):
self._draw_solid_display_list(dl)
# Draw wireframe component if style includes wireframe
if style & 1:
dl = self._render_stack_top(self._draw_wireframe)
if dl > 0 and GL_TRUE == glIsList(dl):
self._draw_wireframe_display_list(dl)
for f in self.postdraw:
if callable(f):
f()
def _on_change_color(self, color):
Thread(target=self._calculate_cverts).start()
def _on_calculate(self):
Thread(target=self._calculate_all).start()
def _calculate_all(self):
self._calculate_verts()
self._calculate_cverts()
def _calculate_verts(self):
if self._calculating_verts.isSet():
return
self._calculating_verts.set()
try:
self._on_calculate_verts()
finally:
self._calculating_verts.clear()
if callable(self.bounds_callback):
self.bounds_callback()
def _calculate_cverts(self):
if self._calculating_verts.isSet():
return
while self._calculating_cverts.isSet():
sleep(0) # wait for previous calculation
self._calculating_cverts.set()
try:
self._on_calculate_cverts()
finally:
self._calculating_cverts.clear()
def _get_calculating_verts(self):
return self._calculating_verts.isSet()
def _get_calculating_verts_pos(self):
return self._calculating_verts_pos
def _get_calculating_verts_len(self):
return self._calculating_verts_len
def _get_calculating_cverts(self):
return self._calculating_cverts.isSet()
def _get_calculating_cverts_pos(self):
return self._calculating_cverts_pos
def _get_calculating_cverts_len(self):
return self._calculating_cverts_len
## Property handlers
def _get_style(self):
return self._style
@synchronized
def _set_style(self, v):
if v is None:
return
if v == '':
step_max = 0
for i in self.intervals:
if i.v_steps is None:
continue
step_max = max([step_max, int(i.v_steps)])
v = ['both', 'solid'][step_max > 40]
if v not in self.styles:
raise ValueError("v should be there in self.styles")
if v == self._style:
return
self._style = v
def _get_color(self):
return self._color
@synchronized
def _set_color(self, v):
try:
if v is not None:
if is_sequence(v):
v = ColorScheme(*v)
else:
v = ColorScheme(v)
if repr(v) == repr(self._color):
return
self._on_change_color(v)
self._color = v
except Exception as e:
raise RuntimeError(("Color change failed. "
"Reason: %s" % (str(e))))
style = property(_get_style, _set_style)
color = property(_get_color, _set_color)
calculating_verts = property(_get_calculating_verts)
calculating_verts_pos = property(_get_calculating_verts_pos)
calculating_verts_len = property(_get_calculating_verts_len)
calculating_cverts = property(_get_calculating_cverts)
calculating_cverts_pos = property(_get_calculating_cverts_pos)
calculating_cverts_len = property(_get_calculating_cverts_len)
## String representations
def __str__(self):
f = ", ".join(str(d) for d in self.d_vars)
o = "'mode=%s'" % (self.primary_alias)
return ", ".join([f, o])
def __repr__(self):
f = ", ".join(str(d) for d in self.d_vars)
i = ", ".join(str(i) for i in self.intervals)
d = [('mode', self.primary_alias),
('color', str(self.color)),
('style', str(self.style))]
o = "'%s'" % (("; ".join("%s=%s" % (k, v)
for k, v in d if v != 'None')))
return ", ".join([f, i, o])
|
sung-su/crosswalk-tizen | refs/heads/master | tools/gyp/pylib/gyp/MSVSToolFile.py | 2736 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
class Writer(object):
"""Visual Studio XML tool file writer."""
def __init__(self, tool_file_path, name):
"""Initializes the tool file.
Args:
tool_file_path: Path to the tool file.
name: Name of the tool file.
"""
self.tool_file_path = tool_file_path
self.name = name
self.rules_section = ['Rules']
def AddCustomBuildRule(self, name, cmd, description,
additional_dependencies,
outputs, extensions):
"""Adds a rule to the tool file.
Args:
name: Name of the rule.
description: Description of the rule.
cmd: Command line of the rule.
additional_dependencies: other files which may trigger the rule.
outputs: outputs of the rule.
extensions: extensions handled by the rule.
"""
rule = ['CustomBuildRule',
{'Name': name,
'ExecutionDescription': description,
'CommandLine': cmd,
'Outputs': ';'.join(outputs),
'FileExtensions': ';'.join(extensions),
'AdditionalDependencies':
';'.join(additional_dependencies)
}]
self.rules_section.append(rule)
def WriteIfChanged(self):
"""Writes the tool file."""
content = ['VisualStudioToolFile',
{'Version': '8.00',
'Name': self.name
},
self.rules_section
]
easy_xml.WriteXmlIfChanged(content, self.tool_file_path,
encoding="Windows-1252")
|
hradec/gaffer | refs/heads/master | python/GafferArnoldUITest/DocumentationTest.py | 8 | ##########################################################################
#
# Copyright (c) 2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import GafferUITest
import GafferScene
import GafferSceneUI
import GafferArnold
import GafferArnoldUI
class DocumentationTest( GafferUITest.TestCase ) :
def test( self ) :
self.maxDiff = None
self.assertNodesAreDocumented(
GafferArnold,
additionalTerminalPlugTypes = ( GafferScene.ScenePlug, )
)
def testExamples( self ) :
self.assertExampleFilesExist()
self.assertExampleFilesDontReferenceUnstablePaths()
if __name__ == "__main__":
unittest.main()
|
bankonmeOS/cjdns | refs/heads/master | node_build/dependencies/libuv/build/gyp/pylib/gyp/MSVSNew.py | 601 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""New implementation of Visual Studio project generation."""
import os
import random
import gyp.common
# hashlib is supplied as of Python 2.5 as the replacement interface for md5
# and other secure hashes. In 2.6, md5 is deprecated. Import hashlib if
# available, avoiding a deprecation warning under 2.6. Import md5 otherwise,
# preserving 2.4 compatibility.
try:
import hashlib
_new_md5 = hashlib.md5
except ImportError:
import md5
_new_md5 = md5.new
# Initialize random number generator
random.seed()
# GUIDs for project types
ENTRY_TYPE_GUIDS = {
'project': '{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}',
'folder': '{2150E333-8FDC-42A3-9474-1A3956D46DE8}',
}
#------------------------------------------------------------------------------
# Helper functions
def MakeGuid(name, seed='msvs_new'):
"""Returns a GUID for the specified target name.
Args:
name: Target name.
seed: Seed for MD5 hash.
Returns:
A GUID-line string calculated from the name and seed.
This generates something which looks like a GUID, but depends only on the
name and seed. This means the same name/seed will always generate the same
GUID, so that projects and solutions which refer to each other can explicitly
determine the GUID to refer to explicitly. It also means that the GUID will
not change when the project for a target is rebuilt.
"""
# Calculate a MD5 signature for the seed and name.
d = _new_md5(str(seed) + str(name)).hexdigest().upper()
# Convert most of the signature to GUID form (discard the rest)
guid = ('{' + d[:8] + '-' + d[8:12] + '-' + d[12:16] + '-' + d[16:20]
+ '-' + d[20:32] + '}')
return guid
#------------------------------------------------------------------------------
class MSVSSolutionEntry(object):
def __cmp__(self, other):
# Sort by name then guid (so things are in order on vs2008).
return cmp((self.name, self.get_guid()), (other.name, other.get_guid()))
class MSVSFolder(MSVSSolutionEntry):
"""Folder in a Visual Studio project or solution."""
def __init__(self, path, name = None, entries = None,
guid = None, items = None):
"""Initializes the folder.
Args:
path: Full path to the folder.
name: Name of the folder.
entries: List of folder entries to nest inside this folder. May contain
Folder or Project objects. May be None, if the folder is empty.
guid: GUID to use for folder, if not None.
items: List of solution items to include in the folder project. May be
None, if the folder does not directly contain items.
"""
if name:
self.name = name
else:
# Use last layer.
self.name = os.path.basename(path)
self.path = path
self.guid = guid
# Copy passed lists (or set to empty lists)
self.entries = sorted(list(entries or []))
self.items = list(items or [])
self.entry_type_guid = ENTRY_TYPE_GUIDS['folder']
def get_guid(self):
if self.guid is None:
# Use consistent guids for folders (so things don't regenerate).
self.guid = MakeGuid(self.path, seed='msvs_folder')
return self.guid
#------------------------------------------------------------------------------
class MSVSProject(MSVSSolutionEntry):
"""Visual Studio project."""
def __init__(self, path, name = None, dependencies = None, guid = None,
spec = None, build_file = None, config_platform_overrides = None,
fixpath_prefix = None):
"""Initializes the project.
Args:
path: Absolute path to the project file.
name: Name of project. If None, the name will be the same as the base
name of the project file.
dependencies: List of other Project objects this project is dependent
upon, if not None.
guid: GUID to use for project, if not None.
spec: Dictionary specifying how to build this project.
build_file: Filename of the .gyp file that the vcproj file comes from.
config_platform_overrides: optional dict of configuration platforms to
used in place of the default for this target.
fixpath_prefix: the path used to adjust the behavior of _fixpath
"""
self.path = path
self.guid = guid
self.spec = spec
self.build_file = build_file
# Use project filename if name not specified
self.name = name or os.path.splitext(os.path.basename(path))[0]
# Copy passed lists (or set to empty lists)
self.dependencies = list(dependencies or [])
self.entry_type_guid = ENTRY_TYPE_GUIDS['project']
if config_platform_overrides:
self.config_platform_overrides = config_platform_overrides
else:
self.config_platform_overrides = {}
self.fixpath_prefix = fixpath_prefix
self.msbuild_toolset = None
def set_dependencies(self, dependencies):
self.dependencies = list(dependencies or [])
def get_guid(self):
if self.guid is None:
# Set GUID from path
# TODO(rspangler): This is fragile.
# 1. We can't just use the project filename sans path, since there could
# be multiple projects with the same base name (for example,
# foo/unittest.vcproj and bar/unittest.vcproj).
# 2. The path needs to be relative to $SOURCE_ROOT, so that the project
# GUID is the same whether it's included from base/base.sln or
# foo/bar/baz/baz.sln.
# 3. The GUID needs to be the same each time this builder is invoked, so
# that we don't need to rebuild the solution when the project changes.
# 4. We should be able to handle pre-built project files by reading the
# GUID from the files.
self.guid = MakeGuid(self.name)
return self.guid
def set_msbuild_toolset(self, msbuild_toolset):
self.msbuild_toolset = msbuild_toolset
#------------------------------------------------------------------------------
class MSVSSolution:
"""Visual Studio solution."""
def __init__(self, path, version, entries=None, variants=None,
websiteProperties=True):
"""Initializes the solution.
Args:
path: Path to solution file.
version: Format version to emit.
entries: List of entries in solution. May contain Folder or Project
objects. May be None, if the folder is empty.
variants: List of build variant strings. If none, a default list will
be used.
websiteProperties: Flag to decide if the website properties section
is generated.
"""
self.path = path
self.websiteProperties = websiteProperties
self.version = version
# Copy passed lists (or set to empty lists)
self.entries = list(entries or [])
if variants:
# Copy passed list
self.variants = variants[:]
else:
# Use default
self.variants = ['Debug|Win32', 'Release|Win32']
# TODO(rspangler): Need to be able to handle a mapping of solution config
# to project config. Should we be able to handle variants being a dict,
# or add a separate variant_map variable? If it's a dict, we can't
# guarantee the order of variants since dict keys aren't ordered.
# TODO(rspangler): Automatically write to disk for now; should delay until
# node-evaluation time.
self.Write()
def Write(self, writer=gyp.common.WriteOnDiff):
"""Writes the solution file to disk.
Raises:
IndexError: An entry appears multiple times.
"""
# Walk the entry tree and collect all the folders and projects.
all_entries = set()
entries_to_check = self.entries[:]
while entries_to_check:
e = entries_to_check.pop(0)
# If this entry has been visited, nothing to do.
if e in all_entries:
continue
all_entries.add(e)
# If this is a folder, check its entries too.
if isinstance(e, MSVSFolder):
entries_to_check += e.entries
all_entries = sorted(all_entries)
# Open file and print header
f = writer(self.path)
f.write('Microsoft Visual Studio Solution File, '
'Format Version %s\r\n' % self.version.SolutionVersion())
f.write('# %s\r\n' % self.version.Description())
# Project entries
sln_root = os.path.split(self.path)[0]
for e in all_entries:
relative_path = gyp.common.RelativePath(e.path, sln_root)
# msbuild does not accept an empty folder_name.
# use '.' in case relative_path is empty.
folder_name = relative_path.replace('/', '\\') or '.'
f.write('Project("%s") = "%s", "%s", "%s"\r\n' % (
e.entry_type_guid, # Entry type GUID
e.name, # Folder name
folder_name, # Folder name (again)
e.get_guid(), # Entry GUID
))
# TODO(rspangler): Need a way to configure this stuff
if self.websiteProperties:
f.write('\tProjectSection(WebsiteProperties) = preProject\r\n'
'\t\tDebug.AspNetCompiler.Debug = "True"\r\n'
'\t\tRelease.AspNetCompiler.Debug = "False"\r\n'
'\tEndProjectSection\r\n')
if isinstance(e, MSVSFolder):
if e.items:
f.write('\tProjectSection(SolutionItems) = preProject\r\n')
for i in e.items:
f.write('\t\t%s = %s\r\n' % (i, i))
f.write('\tEndProjectSection\r\n')
if isinstance(e, MSVSProject):
if e.dependencies:
f.write('\tProjectSection(ProjectDependencies) = postProject\r\n')
for d in e.dependencies:
f.write('\t\t%s = %s\r\n' % (d.get_guid(), d.get_guid()))
f.write('\tEndProjectSection\r\n')
f.write('EndProject\r\n')
# Global section
f.write('Global\r\n')
# Configurations (variants)
f.write('\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n')
for v in self.variants:
f.write('\t\t%s = %s\r\n' % (v, v))
f.write('\tEndGlobalSection\r\n')
# Sort config guids for easier diffing of solution changes.
config_guids = []
config_guids_overrides = {}
for e in all_entries:
if isinstance(e, MSVSProject):
config_guids.append(e.get_guid())
config_guids_overrides[e.get_guid()] = e.config_platform_overrides
config_guids.sort()
f.write('\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n')
for g in config_guids:
for v in self.variants:
nv = config_guids_overrides[g].get(v, v)
# Pick which project configuration to build for this solution
# configuration.
f.write('\t\t%s.%s.ActiveCfg = %s\r\n' % (
g, # Project GUID
v, # Solution build configuration
nv, # Project build config for that solution config
))
# Enable project in this solution configuration.
f.write('\t\t%s.%s.Build.0 = %s\r\n' % (
g, # Project GUID
v, # Solution build configuration
nv, # Project build config for that solution config
))
f.write('\tEndGlobalSection\r\n')
# TODO(rspangler): Should be able to configure this stuff too (though I've
# never seen this be any different)
f.write('\tGlobalSection(SolutionProperties) = preSolution\r\n')
f.write('\t\tHideSolutionNode = FALSE\r\n')
f.write('\tEndGlobalSection\r\n')
# Folder mappings
# Omit this section if there are no folders
if any([e.entries for e in all_entries if isinstance(e, MSVSFolder)]):
f.write('\tGlobalSection(NestedProjects) = preSolution\r\n')
for e in all_entries:
if not isinstance(e, MSVSFolder):
continue # Does not apply to projects, only folders
for subentry in e.entries:
f.write('\t\t%s = %s\r\n' % (subentry.get_guid(), e.get_guid()))
f.write('\tEndGlobalSection\r\n')
f.write('EndGlobal\r\n')
f.close()
|
drusk/pml | refs/heads/master | pml/supervised/classifiers.py | 1 | # Copyright (C) 2012 David Rusk
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Classification algorithms for supervised learning tasks.
@author: drusk
"""
import pandas as pd
from pml.data import model
from pml.utils.errors import UnlabelledDataSetError, InconsistentFeaturesError
class AbstractClassifier(object):
"""
This is the base class which classification algorithms should extend. It
provides the common functionality for each classifier.
"""
def __init__(self, training_set):
"""
Constructs the classifier. Subclasses may have additional parameters
in their constructors.
Args:
training_set:
A labelled DataSet object used to train the classifier.
Raises:
UnlabelledDataSetError if the training set is not labelled.
"""
if not training_set.is_labelled():
raise UnlabelledDataSetError(custom_message=("Training set must "
"be labelled."))
self.training_set = training_set
def __repr__(self):
"""
This gets called when the object's name is typed into IPython on its
own line, causing a string representation of the object to be
displayed.
NOTE: override __str__ in subclass to set this.
Returns:
This object's string representation, primarily for debugging
purposes.
"""
return self.__str__()
def classify_all(self, dataset):
"""
Predicts the classification of each sample in a dataset.
Args:
dataset: DataSet compatible object (see DataSet constructor)
the dataset whose samples (observations) will be classified.
Returns:
A ClassifiedDataSet which contains the classification results for
each sample. It also contains the original data.
"""
dataset = model.as_dataset(dataset)
return ClassifiedDataSet(dataset, dataset.reduce_rows(self.classify))
def classify(self, sample):
"""
Predicts a sample's classification based on the training set.
Args:
sample:
the sample or observation to be classified.
Returns:
The sample's classification.
Raises:
InconsistentFeaturesError if the sample doesn't have the same
features as the training data.
"""
sample = pd.Series(sample)
self._check_feature_list(sample)
return self._classify(sample)
def _classify(self, sample):
"""
Classifiers which subclass this AbstractClassifier must implement
this method to provide the classification algorithm.
"""
raise NotImplementedError(("Classifiers must implement the "
"'_classify' method."))
def _check_feature_list(self, sample):
"""
Raises an InconsistentFeaturesError if the sample does not have
the same features as the training data.
"""
expected_features = self.training_set.feature_list()
actual_features = sample.index.tolist()
if set(expected_features) != set(actual_features):
raise InconsistentFeaturesError(expected_features,
actual_features)
class ClassifiedDataSet(model.DataSet):
"""
A collection of data which has been analysed by a classification
algorithm. It contains both the original DataSet and the results of
the classification. It provides methods for analysing these
classification results.
"""
def __init__(self, dataset, classifications):
"""
Creates a new ClassifiedDataSet.
Args:
dataset: model.DataSet
A dataset which has been classified but does not hold the results.
classifications: pandas.Series
A Series with the classification results.
"""
super(ClassifiedDataSet, self).__init__(dataset.get_data_frame(),
dataset.get_labels())
self.classifications = classifications
def get_classifications(self):
"""
Retrieves the classifications computed for this dataset.
Returns:
A pandas Series containing each sample's classification.
"""
return self.classifications
def compute_accuracy(self):
"""
Calculates the percent accuracy of classification results.
Returns:
The percent accuracy of the classification results, i.e. the number
of samples correctly classified divided by the total number of
samples. Should be a floating point number between 0 and 1.
Raises:
UnlabelledDataSetError if the dataset is not labelled.
"""
if not self.is_labelled():
raise UnlabelledDataSetError()
correct = 0
for ind in self.classifications.index:
if self.classifications[ind] == self.labels[ind]:
correct += 1
return float(correct) / len(self.classifications)
|
dpgeorge/micropython | refs/heads/master | tests/thread/mutate_set.py | 14 | # test concurrent mutating access to a shared set object
#
# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
import _thread
# the shared set
se = set([-1, -2, -3, -4])
# main thread function
def th(n, lo, hi):
for repeat in range(n):
for i in range(lo, hi):
se.add(i)
assert i in se
se.remove(i)
assert i not in se
with lock:
global n_finished
n_finished += 1
lock = _thread.allocate_lock()
n_thread = 4
n_finished = 0
# spawn threads
for i in range(n_thread):
_thread.start_new_thread(th, (50, i * 500, (i + 1) * 500))
# busy wait for threads to finish
while n_finished < n_thread:
pass
# check set has correct contents
print(sorted(se))
|
PavanGupta01/aerospike-admin | refs/heads/master | lib/controller.py | 1 | # Copyright 2013-2014 Aerospike, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from lib.controllerlib import *
from lib import util
import time, os, sys, platform, shutil, urllib2, socket
def flip_keys(orig_data):
new_data = {}
for key1, data1 in orig_data.iteritems():
if isinstance(data1, Exception):
continue
for key2, data2 in data1.iteritems():
if key2 not in new_data:
new_data[key2] = {}
new_data[key2][key1] = data2
return new_data
@CommandHelp('Aerospike Admin')
class RootController(BaseController):
def __init__(self, seed_nodes=[('127.0.0.1',3000)]
, use_telnet=False, user=None, password=None):
super(RootController, self).__init__(seed_nodes=seed_nodes
, use_telnet=use_telnet
, user=user
, password=password)
self.controller_map = {
'info':InfoController
, 'show':ShowController
, 'asinfo':ASInfoController
, 'clinfo':ASInfoController
, 'cluster':ClusterController
, '!':ShellController
, 'shell':ShellController
, 'collectinfo':CollectinfoController
}
@CommandHelp('Terminate session')
def do_exit(self, line):
# This function is a hack for autocomplete
return "EXIT"
@CommandHelp('Returns documentation related to a command'
, 'for example, to retrieve documentation for the "info"'
, 'command use "help info".')
def do_help(self, line):
self.executeHelp(line)
@CommandHelp('"watch" Runs a command for a specified pause and iterations.'
, 'Usage: watch [pause] [iterations] [--no-diff] command]'
, ' pause: the duration between executions.'
, ' [default: 2 seconds]'
, ' iterations: Number of iterations to execute command.'
, ' [default: until keyboard interrupt]'
, ' --no-diff: Do not do diff highlighting'
, 'Example 1: Show "info network" 3 times with 1 second pause'
, ' watch 1 3 info network'
, 'Example 2: Show "info namespace" with 5 second pause until'
, ' interrupted'
, ' watch 5 info namespace')
def do_watch(self, line):
self.view.watch(self, line)
@CommandHelp('The "info" command provides summary tables for various aspects'
, 'of Aerospike functionality.')
class InfoController(CommandController):
def __init__(self):
self.modifiers = set(['with'])
@CommandHelp('Displays service, network, namespace, and xdr summary'
, 'information.')
def _do_default(self, line):
self.do_service(line)
self.do_network(line)
self.do_namespace(line)
self.do_xdr(line)
@CommandHelp('Displays summary information for the Aerospike service.')
def do_service(self, line):
stats = self.cluster.infoStatistics(nodes=self.nodes)
builds = self.cluster.info('build', nodes=self.nodes)
services = self.cluster.infoServices(nodes=self.nodes)
visible = self.cluster.getVisibility()
visibility = {}
for node_id, service_list in services.iteritems():
if isinstance(service_list, Exception):
continue
service_set = set(service_list)
if len((visible | service_set) - service_set) != 1:
visibility[node_id] = False
else:
visibility[node_id] = True
self.view.infoService(stats, builds, visibility, self.cluster, **self.mods)
@CommandHelp('Displays network information for Aerospike, the main'
, 'purpose of this information is to link node ids to'
, 'fqdn/ip addresses.')
def do_network(self, line):
stats = self.cluster.infoStatistics(nodes=self.nodes)
hosts = self.cluster.nodes
# get current time from namespace
ns_stats = self.cluster.infoAllNamespaceStatistics(nodes=self.nodes)
for host, configs in ns_stats.iteritems():
if isinstance(configs, Exception):
continue
ns = configs.keys()[0]
if ns:
# lets just add it to stats
if not isinstance(configs[ns], Exception) and \
not isinstance(stats[host], Exception):
stats[host]['current-time'] = configs[ns]['current-time']
self.view.infoNetwork(stats, hosts, self.cluster, **self.mods)
@CommandHelp('Displays summary information for each namespace.')
def do_namespace(self, line):
stats = self.cluster.infoAllNamespaceStatistics(nodes=self.nodes)
self.view.infoNamespace(stats, self.cluster, **self.mods)
@CommandHelp('Displays summary information for Cross Datacenter'
, 'Replication (XDR).')
def do_xdr(self, line):
stats = self.cluster.infoXDRStatistics(nodes=self.nodes)
builds = self.cluster.xdrInfo('build', nodes=self.nodes)
xdr_enable = self.cluster.isXDREnabled(nodes=self.nodes)
self.view.infoXDR(stats, builds, xdr_enable, self.cluster, **self.mods)
@CommandHelp('Displays summary information for Seconday Indexes (SIndex).')
def do_sindex(self, line):
stats = self.cluster.infoSIndex(nodes=self.nodes)
sindexes = {}
for host, stat_list in stats.iteritems():
for stat in stat_list:
if not stat:
continue
indexname = stat['indexname']
if indexname not in sindexes:
sindexes[indexname] = {}
sindexes[indexname][host] = stat
self.view.infoSIndex(stats, self.cluster, **self.mods)
@CommandHelp('"asinfo" provides raw access to the info protocol.'
, ' Options:'
, ' -v <command> - The command to execute'
, ' -p <port> - The port to use.'
, ' NOTE: currently restricted to 3000 or 3004'
, ' -l - Replace semicolons ";" with newlines.')
class ASInfoController(CommandController):
def __init__(self):
self.modifiers = set(['with', 'like'])
@CommandHelp('Executes an info command.')
def _do_default(self, line):
if not line:
raise ShellException("Could not understand asinfo request, " + \
"see 'help asinfo'")
mods = self.parseModifiers(line)
line = mods['line']
like = mods['like']
nodes = self.nodes
value = None
line_sep = False
xdr = False
tline = line[:]
while tline:
word = tline.pop(0)
if word == '-v':
value = tline.pop(0)
elif word == '-l':
line_sep = True
elif word == '-p':
port = tline.pop(0)
if port == '3004': # ugly Hack
xdr = True
else:
raise ShellException(
"Do not understand '%s' in '%s'"%(word
, " ".join(line)))
value = value.translate(None, "'\"")
if xdr:
results = self.cluster.xdrInfo(value, nodes=nodes)
else:
results = self.cluster.info(value, nodes=nodes)
self.view.asinfo(results, line_sep, self.cluster, **mods)
@CommandHelp('"shell" is used to run shell commands on the local node.')
class ShellController(CommandController):
def _do_default(self, line):
command = line
out, err = util.shell_command(command)
if err:
print err
if out:
print out
@CommandHelp('"show" is used to display Aerospike Statistics and'
, 'configuration.')
class ShowController(CommandController):
def __init__(self):
self.controller_map = {
'config':ShowConfigController
, 'statistics':ShowStatisticsController
, 'latency':ShowLatencyController
, 'distribution':ShowDistributionController
}
self.modifiers = set()
def _do_default(self, line):
self.executeHelp(line)
@CommandHelp('"distribution" is used to show the distribution of object sizes'
, 'and time to live for node and a namespace.')
class ShowDistributionController(CommandController):
def __init__(self):
self.modifiers = set(['with', 'like'])
@CommandHelp('Shows the distributions of Time to Live and Object Size')
def _do_default(self, line):
self.do_time_to_live(line[:])
self.do_object_size(line[:])
def _do_distribution(self, histogram_name, title, unit):
histogram = self.cluster.infoHistogram(histogram_name, nodes=self.nodes)
histogram = flip_keys(histogram)
for namespace, host_data in histogram.iteritems():
for host_id, data in host_data.iteritems():
hist = data['data']
width = data['width']
cum_total = 0
total = sum(hist)
percentile = 0.1
result = []
for i, v in enumerate(hist):
cum_total += float(v)
if total > 0:
portion = cum_total / total
else:
portion = 0.0
while portion >= percentile:
percentile += 0.1
result.append(i+1)
if percentile > 1.0:
break
if result == []:
result = [0] * 10
data['percentiles'] = [r * width for r in result]
self.view.showDistribution(title
, histogram
, unit
, histogram_name
, self.cluster
, **self.mods)
@CommandHelp('Shows the distribution of TTLs for namespaces')
def do_time_to_live(self, line):
self._do_distribution('ttl', 'TTL Distribution', 'Seconds')
@CommandHelp('Shows the distribution of Eviction TTLs for namespaces')
def do_eviction(self, line):
self._do_distribution('evict', 'Eviction Distribution', 'Seconds')
@CommandHelp('Shows the distribution of Object sizes for namespaces')
def do_object_size(self, line):
self._do_distribution('objsz', 'Object Size Distribution', 'Record Blocks')
class ShowLatencyController(CommandController):
def __init__(self):
self.modifiers = set(['with', 'like'])
@CommandHelp('Displays latency information for Aerospike cluster.')
def _do_default(self, line):
self.modifiers.add('like')
self.modifiers.remove('like')
latency = self.cluster.infoLatency(nodes=self.nodes)
hist_latency = {}
for node_id, hist_data in latency.iteritems():
if isinstance(hist_data, Exception):
continue
for hist_name, data in hist_data.iteritems():
if hist_name not in hist_latency:
hist_latency[hist_name] = {node_id:data}
else:
hist_latency[hist_name][node_id] = data
self.view.showLatency(hist_latency, self.cluster, **self.mods)
@CommandHelp('"show config" is used to display Aerospike configuration settings')
class ShowConfigController(CommandController):
def __init__(self):
self.modifiers = set(['with', 'like'])
@CommandHelp('Displays service, network, namespace, and xdr configuration')
def _do_default(self, line):
self.do_service(line)
self.do_network(line)
self.do_namespace(line)
self.do_xdr(line)
@CommandHelp('Displays service configuration')
def do_service(self, line):
service_configs = self.cluster.infoGetConfig(nodes=self.nodes
, stanza='service')
for node in service_configs:
if isinstance(service_configs[node], Exception):
service_configs[node] = {}
else:
service_configs[node] = service_configs[node]['service']
self.view.showConfig("Service Configuration"
, service_configs
, self.cluster, **self.mods)
@CommandHelp('Displays network configuration')
def do_network(self, line):
hb_configs = self.cluster.infoGetConfig(nodes=self.nodes
, stanza='network.heartbeat')
info_configs = self.cluster.infoGetConfig(nodes=self.nodes
, stanza='network.info')
network_configs = {}
for node in hb_configs:
if isinstance(hb_configs[node], Exception):
network_configs[node] = {}
else:
network_configs[node] = hb_configs[node]['network.heartbeat']
for node in info_configs:
if isinstance(info_configs[node], Exception):
continue
else:
network_configs[node].update(info_configs[node]['network.info'])
self.view.showConfig("Network Configuration", network_configs
, self.cluster, **self.mods)
@CommandHelp('Displays namespace configuration')
def do_namespace(self, line):
namespace_configs = self.cluster.infoGetConfig(nodes=self.nodes
, stanza='namespace')
for node in namespace_configs:
if isinstance(namespace_configs[node], Exception):
namespace_configs[node] = {}
else:
namespace_configs[node] = namespace_configs[node]['namespace']
ns_configs = {}
for host, configs in namespace_configs.iteritems():
for ns, config in configs.iteritems():
if ns not in ns_configs:
ns_configs[ns] = {}
try:
ns_configs[ns][host].update(config)
except KeyError:
ns_configs[ns][host] = config
for ns, configs in ns_configs.iteritems():
self.view.showConfig("%s Namespace Configuration"%(ns)
, configs, self.cluster, **self.mods)
@CommandHelp('Displays XDR configuration')
def do_xdr(self, line):
xdr_configs = self.cluster.infoXDRGetConfig(nodes=self.nodes)
xdr_filtered = {}
for node, config in xdr_configs.iteritems():
if isinstance(config, Exception):
continue
xdr_filtered[node] = config['xdr']
self.view.showConfig("XDR Configuration", xdr_filtered, self.cluster
, **self.mods)
@CommandHelp('Displays statistics for Aerospike components.')
class ShowStatisticsController(CommandController):
def __init__(self):
self.modifiers = set(['with', 'like'])
@CommandHelp('Displays bin, set, service, namespace, and xdr statistics')
def _do_default(self, line):
self.do_bins(line)
self.do_sets(line)
self.do_service(line)
self.do_namespace(line)
self.do_xdr(line)
@CommandHelp('Displays service statistics')
def do_service(self, line):
service_stats = self.cluster.infoStatistics(nodes=self.nodes)
self.view.showStats("Service Statistics", service_stats, self.cluster
, **self.mods)
@CommandHelp('Displays namespace statistics')
def do_namespace(self, line):
namespaces = self.cluster.infoNamespaces(nodes=self.nodes)
namespaces = namespaces.values()
namespace_set = set()
for namespace in namespaces:
if isinstance(namespace, Exception):
continue
namespace_set.update(namespace)
for namespace in sorted(namespace_set):
ns_stats = self.cluster.infoNamespaceStatistics(namespace
, nodes=self.nodes)
self.view.showStats("%s Namespace Statistics"%(namespace)
, ns_stats
, self.cluster
, **self.mods)
@CommandHelp('Displays set statistics')
def do_sets(self, line):
sets = self.cluster.infoSetStatistics(nodes=self.nodes)
set_stats = {}
for host_id, key_values in sets.iteritems():
if isinstance(key_values, Exception):
continue
for key, values in key_values.iteritems():
if key not in set_stats:
set_stats[key] = {}
host_vals = set_stats[key]
if host_id not in host_vals:
host_vals[host_id] = {}
hv = host_vals[host_id]
hv.update(values)
for (namespace, set_name), stats in set_stats.iteritems():
self.view.showStats("%s %s Set Statistics"%(namespace, set_name)
, stats
, self.cluster
, **self.mods)
@CommandHelp('Displays bin statistics')
def do_bins(self, line):
bin_stats = self.cluster.infoBinStatistics(nodes=self.nodes)
new_bin_stats = {}
for node_id, bin_stat in bin_stats.iteritems():
if isinstance(bin_stat, Exception):
continue
for namespace, stats in bin_stat.iteritems():
if namespace not in new_bin_stats:
new_bin_stats[namespace] = {}
ns_stats = new_bin_stats[namespace]
if node_id not in ns_stats:
ns_stats[node_id] = {}
node_stats = ns_stats[node_id]
node_stats.update(stats)
for namespace, bin_stats in new_bin_stats.iteritems():
self.view.showStats("%s Bin Statistics"%(namespace)
, bin_stats
, self.cluster
, **self.mods)
@CommandHelp('Displays xdr statistics')
def do_xdr(self, line):
xdr_stats = self.cluster.infoXDRStatistics(nodes=self.nodes)
self.view.showStats("XDR Statistics"
, xdr_stats
, self.cluster
, **self.mods)
class ClusterController(CommandController):
def __init__(self):
self.modifiers = set(['with'])
def _do_default(self, line):
self.executeHelp(line)
def do_dun(self, line):
results = self.cluster.infoDun(self.mods['line'], nodes=self.nodes)
self.view.dun(results, self.cluster, **self.mods)
def do_undun(self, line):
results = self.cluster.infoUndun(self.mods['line'], nodes=self.nodes)
self.view.dun(results, self.cluster, **self.mods)
@CommandHelp('"collectinfo" is used to collect system stats on the local node.')
class CollectinfoController(CommandController):
def collect_local_file(self,src,dest_dir):
try:
shutil.copy2(src, dest_dir)
except Exception,e:
print e
pass
return
def collectinfo_content(self,func,parm=''):
name = ''
capture_stdout = util.capture_stdout
sep = "\n====ASCOLLECTINFO====\n"
try:
name = func.func_name
except Exception:
pass
info_line = "[INFO] Data collection for " + name +str(parm) + " in progress.."
print info_line
sep += info_line+"\n"
if func == 'shell':
o,e = util.shell_command(parm)
elif func == 'cluster':
o = self.cluster.info(parm)
else:
o = capture_stdout(func,parm)
self.write_log(sep+str(o))
return ''
def write_log(self,collectedinfo):
global aslogfile
f = open(str(aslogfile), 'a')
f.write(str(collectedinfo))
return f.close()
def get_metadata(self,response_str,prefix=''):
aws_c = ''
aws_metadata_base_url = 'http://169.254.169.254/latest/meta-data'
prefix_o = prefix
if prefix_o == '/':
prefix = ''
for rsp in response_str.split("\n"):
if rsp[-1:] == '/':
if prefix_o == '': #First level child
rsp_p = rsp.strip('/')
else:
rsp_p = rsp
self.get_metadata(rsp_p,prefix)
else:
meta_url = aws_metadata_base_url+prefix+'/'+rsp
req = urllib2.Request(meta_url)
r = urllib2.urlopen(req)
# r = requests.get(meta_url,timeout=aws_timeout)
if r.code != 404:
aws_c += rsp +'\n'+r.read() +"\n"
return aws_c
def get_awsdata(self,line):
aws_rsp = ''
aws_timeout = 1
socket.setdefaulttimeout(aws_timeout)
aws_metadata_base_url = 'http://169.254.169.254/latest/meta-data'
try:
req = urllib2.Request(aws_metadata_base_url)
r = urllib2.urlopen(req)
# r = requests.get(aws_metadata_base_url,timeout=aws_timeout)
if r.code == 200:
print "This is in AWS"
rsp = r.read()
aws_rsp += self.get_metadata(rsp,'/')
print aws_rsp
else:
aws_rsp = "Not in AWS"
print aws_rsp
except Exception as e:
print e
print "Not in AWS"
def collect_sys(self,line):
lsof_cmd='sudo lsof|grep `sudo ps aux|grep -v grep|grep -E \'asd|cld\'|awk \'{print $2}\'` 2>/dev/null'
print util.shell_command([lsof_cmd])
print platform.platform()
smd_home = '/opt/aerospike/smd'
if os.path.isdir(smd_home):
smd_files = [ f for f in os.listdir(smd_home) if os.path.isfile(os.path.join(smd_home, f)) ]
for f in smd_files:
smd_f = os.path.join(smd_home, f)
print smd_f
smd_fp = open(smd_f, 'r')
print smd_fp.read()
smd_fp.close()
def archive_log(self,logdir):
util.shell_command(["tar -czvf " + logdir + ".tgz " + aslogdir])
sys.stderr.write("\x1b[2J\x1b[H")
print "\n\n\nFiles in " + logdir + " and " + logdir + ".tgz saved. "
print "END OF ASCOLLECTINFO"
def main_collectinfo(self, line):
collect_output = time.strftime("%Y-%m-%d %H:%M:%S UTC\n", time.gmtime())
info_params = ['network','service', 'namespace', 'xdr', 'sindex']
show_params = ['config', 'distribution', 'latency', 'statistics']
cluster_params = ['service',
'services',
'xdr-min-lastshipinfo:',
'dump-fabric:',
'dump-hb:',
'dump-migrates:',
'dump-msgs:',
'dump-paxos:',
'dump-smd:',
'dump-wb:',
'dump-wb-summary:',
'dump-wr:',
'sindex-dump:'
]
shell_cmds = ['date',
'hostname',
'ifconfig',
'uptime',
'uname -a',
'lsb_release -a',
'ls /etc|grep release|xargs -I f cat /etc/f',
'rpm -qa|grep -E "citrus|aero"',
'dpkg -l|grep -E "citrus|aero"',
'tail -n 10000 /var/log/aerospike/*.log',
'tail -n 10000 /var/log/citrusleaf.log',
'tail -n 10000 /var/log/*xdr.log',
'netstat -pant|grep 3000',
'top -n3 -b',
'free -m',
'df -h',
'ls /sys/block/{sd*,xvd*}/queue/rotational |xargs -I f sh -c "echo f; cat f;"',
'ls /sys/block/{sd*,xvd*}/device/model |xargs -I f sh -c "echo f; cat f;"',
'lsof',
'dmesg',
'iostat -x 1 10',
'vmstat -s',
'vmstat -m',
'iptables -L',
'cat /etc/aerospike/aerospike.conf',
'cat /etc/citrusleaf/citrusleaf.conf',
]
terminal.enable_color(False)
global aslogdir,aslogfile
aslogdir = '/tmp/as_log_' + str(time.time())
aslogfile = aslogdir + '/ascollectinfo.log'
os.mkdir(aslogdir)
self.write_log(collect_output)
log_location = '/var/log/aerospike/*.log'
try:
log_location = '/var/log/aerospike/aerospike.log'
cinfo = InfoController()
for info_param in info_params:
self.collectinfo_content(cinfo,[info_param])
do_show = ShowController()
for show_param in show_params:
self.collectinfo_content(do_show,[show_param])
for cluster_param in cluster_params:
self.collectinfo_content('cluster',cluster_param)
# Below is not optimum, we should query only localhost
logs = self.cluster.info('logs')
for i in logs:
logs_c = logs[i].split(';')
for log in logs_c:
log_location = log.split(':')[1]
cmd = 'tail -n 10000 ' + log_location
if log_location != '/var/log/aerospike/aerospike.log':
self.collectinfo_content('shell',[cmd])
self.collect_local_file(log_location,aslogdir)
except Exception as e:
self.write_log(str(e))
sys.stdout = sys.__stdout__
for cmd in shell_cmds:
self.collectinfo_content('shell',[cmd])
self.collectinfo_content(self.collect_sys)
self.collectinfo_content(self.get_awsdata)
self.archive_log(aslogdir)
def _do_default(self, line):
self.main_collectinfo(line)
|
blooparksystems/odoo | refs/heads/9.0 | addons/hw_scanner/controllers/main.py | 158 | # -*- coding: utf-8 -*-
import logging
import os
import time
from os import listdir
from os.path import join
from threading import Thread, Lock
from select import select
from Queue import Queue, Empty
import openerp
import openerp.addons.hw_proxy.controllers.main as hw_proxy
from openerp import http
from openerp.http import request
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
try:
import evdev
except ImportError:
_logger.error('Odoo module hw_scanner depends on the evdev python module')
evdev = None
class Scanner(Thread):
def __init__(self):
Thread.__init__(self)
self.lock = Lock()
self.status = {'status':'connecting', 'messages':[]}
self.input_dir = '/dev/input/by-id/'
self.barcodes = Queue()
self.keymap = {
2: ("1","!"),
3: ("2","@"),
4: ("3","#"),
5: ("4","$"),
6: ("5","%"),
7: ("6","^"),
8: ("7","&"),
9: ("8","*"),
10:("9","("),
11:("0",")"),
12:("-","_"),
13:("=","+"),
# 14 BACKSPACE
# 15 TAB
16:("q","Q"),
17:("w","W"),
18:("e","E"),
19:("r","R"),
20:("t","T"),
21:("y","Y"),
22:("u","U"),
23:("i","I"),
24:("o","O"),
25:("p","P"),
26:("[","{"),
27:("]","}"),
# 28 ENTER
# 29 LEFT_CTRL
30:("a","A"),
31:("s","S"),
32:("d","D"),
33:("f","F"),
34:("g","G"),
35:("h","H"),
36:("j","J"),
37:("k","K"),
38:("l","L"),
39:(";",":"),
40:("'","\""),
41:("`","~"),
# 42 LEFT SHIFT
43:("\\","|"),
44:("z","Z"),
45:("x","X"),
46:("c","C"),
47:("v","V"),
48:("b","B"),
49:("n","N"),
50:("m","M"),
51:(",","<"),
52:(".",">"),
53:("/","?"),
# 54 RIGHT SHIFT
57:(" "," "),
}
def lockedstart(self):
with self.lock:
if not self.isAlive():
self.daemon = True
self.start()
def set_status(self, status, message = None):
if status == self.status['status']:
if message != None and message != self.status['messages'][-1]:
self.status['messages'].append(message)
else:
self.status['status'] = status
if message:
self.status['messages'] = [message]
else:
self.status['messages'] = []
if status == 'error' and message:
_logger.error('Barcode Scanner Error: '+message)
elif status == 'disconnected' and message:
_logger.info('Disconnected Barcode Scanner: %s', message)
def get_device(self):
try:
if not evdev:
return None
devices = [ device for device in listdir(self.input_dir)]
keyboards = [ device for device in devices if ('kbd' in device) and ('keyboard' not in device.lower())]
scanners = [ device for device in devices if ('barcode' in device.lower()) or ('scanner' in device.lower())]
if len(scanners) > 0:
self.set_status('connected','Connected to '+scanners[0])
return evdev.InputDevice(join(self.input_dir,scanners[0]))
elif len(keyboards) > 0:
self.set_status('connected','Connected to '+keyboards[0])
return evdev.InputDevice(join(self.input_dir,keyboards[0]))
else:
self.set_status('disconnected','Barcode Scanner Not Found')
return None
except Exception as e:
self.set_status('error',str(e))
return None
def get_barcode(self):
""" Returns a scanned barcode. Will wait at most 5 seconds to get a barcode, and will
return barcode scanned in the past if they are not older than 5 seconds and have not
been returned before. This is necessary to catch barcodes scanned while the POS is
busy reading another barcode
"""
self.lockedstart()
while True:
try:
timestamp, barcode = self.barcodes.get(True, 5)
if timestamp > time.time() - 5:
return barcode
except Empty:
return ''
def get_status(self):
self.lockedstart()
return self.status
def run(self):
""" This will start a loop that catches all keyboard events, parse barcode
sequences and put them on a timestamped queue that can be consumed by
the point of sale's requests for barcode events
"""
self.barcodes = Queue()
barcode = []
shift = False
device = None
while True: # barcodes loop
if device: # ungrab device between barcodes and timeouts for plug & play
try:
device.ungrab()
except Exception as e:
device = None
self.set_status('error',str(e))
else:
time.sleep(5) # wait until a suitable device is plugged
device = self.get_device()
if not device:
continue
try:
device.grab()
shift = False
barcode = []
while True: # keycode loop
r,w,x = select([device],[],[],5)
if len(r) == 0: # timeout
break
events = device.read()
for event in events:
if event.type == evdev.ecodes.EV_KEY:
#_logger.debug('Evdev Keyboard event %s',evdev.categorize(event))
if event.value == 1: # keydown events
if event.code in self.keymap:
if shift:
barcode.append(self.keymap[event.code][1])
else:
barcode.append(self.keymap[event.code][0])
elif event.code == 42 or event.code == 54: # SHIFT
shift = True
elif event.code == 28: # ENTER, end of barcode
self.barcodes.put( (time.time(),''.join(barcode)) )
barcode = []
elif event.value == 0: #keyup events
if event.code == 42 or event.code == 54: # LEFT SHIFT
shift = False
except Exception as e:
self.set_status('error',str(e))
scanner_thread = None
if evdev:
scanner_thread = Scanner()
hw_proxy.drivers['scanner'] = scanner_thread
class ScannerDriver(hw_proxy.Proxy):
@http.route('/hw_proxy/scanner', type='json', auth='none', cors='*')
def scanner(self):
return scanner_thread.get_barcode() if scanner_thread else None
|
CalebSLane/openelisglobal-core | refs/heads/develop | liquibase/OE2.9/testCatalogHT_LNSP/scripts/runAll.py | 44 | #! /usr/bin/python
import os
__author__="paulsc"
__date__ ="$May 29, 2012 1:17:54 PM$"
if __name__ == "__main__":
print("dictionary.py")
os.system( "python dictionary.py" )
print("\npanels.py")
os.system( "python panels.py" )
print("\nresultLimits.py")
os.system( "python resultLimits.py" )
print("\nsampleType.py")
os.system( "python sampleType.py" )
print("\nsampleTypePanel.py")
os.system( "python sampleTypePanel.py" )
print("\nsampleTypeTest.py")
os.system( "python sampleTypeTest.py" )
print("\nsortSections .py")
os.system( "python sortSections.py" )
print("\ntestEntry.py")
os.system( "python testEntry.py" )
print("\ntestPanel.py")
os.system( "python testPanel.py" )
print("\ntestResult.py")
os.system( "python testResult.py" )
print("\nuom.py")
os.system( "python uom.py" )
print("\ntestCodes.py")
os.system( "python testCodes.py" )
print("\norderType.py")
os.system( "python orderType.py" )
|
asimshankar/tensorflow | refs/heads/master | tensorflow/python/data/util/random_seed_test.py | 41 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utilities working with arbitrarily nested structures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.util import random_seed as data_random_seed
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class RandomSeedTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testRandomSeed(self):
zero_t = constant_op.constant(0, dtype=dtypes.int64, name='zero')
one_t = constant_op.constant(1, dtype=dtypes.int64, name='one')
intmax_t = constant_op.constant(
2**31 - 1, dtype=dtypes.int64, name='intmax')
test_cases = [
# Each test case is a tuple with input to get_seed:
# (input_graph_seed, input_op_seed)
# and output from get_seed:
# (output_graph_seed, output_op_seed)
((None, None), (0, 0)),
((None, 1), (random_seed.DEFAULT_GRAPH_SEED, 1)),
((1, 1), (1, 1)),
((0, 0), (0, 2**31 - 1)), # Avoid nondeterministic (0, 0) output
((2**31 - 1, 0), (0, 2**31 - 1)), # Don't wrap to (0, 0) either
((0, 2**31 - 1), (0, 2**31 - 1)), # Wrapping for the other argument
# Once more, with tensor-valued arguments
((None, one_t), (random_seed.DEFAULT_GRAPH_SEED, 1)),
((1, one_t), (1, 1)),
((0, zero_t), (0, 2**31 - 1)), # Avoid nondeterministic (0, 0) output
((2**31 - 1, zero_t), (0, 2**31 - 1)), # Don't wrap to (0, 0) either
((0, intmax_t), (0, 2**31 - 1)), # Wrapping for the other argument
]
for tc in test_cases:
tinput, toutput = tc[0], tc[1]
random_seed.set_random_seed(tinput[0])
g_seed, op_seed = data_random_seed.get_seed(tinput[1])
g_seed = self.evaluate(g_seed)
op_seed = self.evaluate(op_seed)
msg = 'test_case = {0}, got {1}, want {2}'.format(
tinput, (g_seed, op_seed), toutput)
self.assertEqual((g_seed, op_seed), toutput, msg=msg)
random_seed.set_random_seed(None)
if not context.executing_eagerly():
random_seed.set_random_seed(1)
tinput = (1, None)
toutput = (1, ops.get_default_graph()._last_id) # pylint: disable=protected-access
random_seed.set_random_seed(tinput[0])
g_seed, op_seed = data_random_seed.get_seed(tinput[1])
g_seed = self.evaluate(g_seed)
op_seed = self.evaluate(op_seed)
msg = 'test_case = {0}, got {1}, want {2}'.format(1, (g_seed, op_seed),
toutput)
self.assertEqual((g_seed, op_seed), toutput, msg=msg)
random_seed.set_random_seed(None)
if __name__ == '__main__':
test.main()
|
wooga/airflow | refs/heads/master | airflow/providers/google/cloud/operators/gcs_to_bigquery.py | 1 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains a Google Cloud Storage to BigQuery operator.
"""
import json
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.bigquery import BigQueryHook
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.utils.decorators import apply_defaults
# pylint: disable=too-many-instance-attributes
class GCSToBigQueryOperator(BaseOperator):
"""
Loads files from Google Cloud Storage into BigQuery.
The schema to be used for the BigQuery table may be specified in one of
two ways. You may either directly pass the schema fields in, or you may
point the operator to a Google Cloud Storage object name. The object in
Google Cloud Storage must be a JSON file with the schema fields in it.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GCSToBigQueryOperator`
:param bucket: The bucket to load from. (templated)
:type bucket: str
:param source_objects: List of Google Cloud Storage URIs to load from. (templated)
If source_format is 'DATASTORE_BACKUP', the list must only contain a single URI.
:type source_objects: list[str]
:param destination_project_dataset_table: The dotted
``(<project>.|<project>:)<dataset>.<table>`` BigQuery table to load data into.
If ``<project>`` is not included, project will be the project defined in
the connection json. (templated)
:type destination_project_dataset_table: str
:param schema_fields: If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load
Should not be set when source_format is 'DATASTORE_BACKUP'.
Parameter must be defined if 'schema_object' is null and autodetect is False.
:type schema_fields: list
:param schema_object: If set, a GCS object path pointing to a .json file that
contains the schema for the table. (templated)
Parameter must be defined if 'schema_fields' is null and autodetect is False.
:type schema_object: str
:param source_format: File format to export.
:type source_format: str
:param compression: [Optional] The compression type of the data source.
Possible values include GZIP and NONE.
The default value is NONE.
This setting is ignored for Google Cloud Bigtable,
Google Cloud Datastore backups and Avro formats.
:type compression: str
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: str
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: str
:param field_delimiter: The delimiter to use when loading from a CSV.
:type field_delimiter: str
:param max_bad_records: The maximum number of bad records that BigQuery can
ignore when running the job.
:type max_bad_records: int
:param quote_character: The value that is used to quote data sections in a CSV file.
:type quote_character: str
:param ignore_unknown_values: [Optional] Indicates if BigQuery should allow
extra values that are not represented in the table schema.
If true, the extra values are ignored. If false, records with extra columns
are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result.
:type ignore_unknown_values: bool
:param allow_quoted_newlines: Whether to allow quoted newlines (true) or not (false).
:type allow_quoted_newlines: bool
:param allow_jagged_rows: Accept rows that are missing trailing optional columns.
The missing values are treated as nulls. If false, records with missing trailing
columns are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result. Only applicable to CSV, ignored
for other formats.
:type allow_jagged_rows: bool
:param encoding: The character encoding of the data. See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions.(key).csvOptions.encoding
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externalDataConfiguration.csvOptions.encoding
:param max_id_key: If set, the name of a column in the BigQuery table
that's to be loaded. This will be used to select the MAX value from
BigQuery after the load occurs. The results will be returned by the
execute() command, which in turn gets stored in XCom for future
operators to use. This can be helpful with incremental loads--during
future executions, you can pick up from the max ID.
:type max_id_key: str
:param bigquery_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform and
interact with the BigQuery service.
:type bigquery_conn_id: str
:param google_cloud_storage_conn_id: (Optional) The connection ID used to connect to Google Cloud
Platform and interact with the Google Cloud Storage service.
:type google_cloud_storage_conn_id: str
:param delegate_to: The account to impersonate, if any. For this to
work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: str
:param schema_update_options: Allows the schema of the destination
table to be updated as a side effect of the load job.
:type schema_update_options: list
:param src_fmt_configs: configure optional fields specific to the source format
:type src_fmt_configs: dict
:param external_table: Flag to specify if the destination table should be
a BigQuery external table. Default Value is False.
:type external_table: bool
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
Note that 'field' is not available in concurrency with
dataset.table$partition.
:type time_partitioning: dict
:param cluster_fields: Request that the result of this load be stored sorted
by one or more columns. This is only available in conjunction with
time_partitioning. The order of columns given determines the sort order.
Not applicable for external tables.
:type cluster_fields: list[str]
:param autodetect: [Optional] Indicates if we should automatically infer the
options and schema for CSV and JSON sources. (Default: ``True``).
Parameter must be setted to True if 'schema_fields' and 'schema_object' are undefined.
It is suggested to set to True if table are create outside of Airflow.
:type autodetect: bool
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
**Example**: ::
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key"
}
:type encryption_configuration: dict
:param location: [Optional] The geographic location of the job. Required except for US and EU.
See details at https://cloud.google.com/bigquery/docs/locations#specifying_your_location
:type location: str
"""
template_fields = ('bucket', 'source_objects',
'schema_object', 'destination_project_dataset_table')
template_ext = ('.sql',)
ui_color = '#f0eee4'
# pylint: disable=too-many-locals,too-many-arguments
@apply_defaults
def __init__(self,
bucket,
source_objects,
destination_project_dataset_table,
schema_fields=None,
schema_object=None,
source_format='CSV',
compression='NONE',
create_disposition='CREATE_IF_NEEDED',
skip_leading_rows=0,
write_disposition='WRITE_EMPTY',
field_delimiter=',',
max_bad_records=0,
quote_character=None,
ignore_unknown_values=False,
allow_quoted_newlines=False,
allow_jagged_rows=False,
encoding="UTF-8",
max_id_key=None,
bigquery_conn_id='google_cloud_default',
google_cloud_storage_conn_id='google_cloud_default',
delegate_to=None,
schema_update_options=(),
src_fmt_configs=None,
external_table=False,
time_partitioning=None,
cluster_fields=None,
autodetect=True,
encryption_configuration=None,
location=None,
*args, **kwargs):
super().__init__(*args, **kwargs)
# GCS config
if src_fmt_configs is None:
src_fmt_configs = {}
if time_partitioning is None:
time_partitioning = {}
self.bucket = bucket
self.source_objects = source_objects
self.schema_object = schema_object
# BQ config
self.destination_project_dataset_table = destination_project_dataset_table
self.schema_fields = schema_fields
self.source_format = source_format
self.compression = compression
self.create_disposition = create_disposition
self.skip_leading_rows = skip_leading_rows
self.write_disposition = write_disposition
self.field_delimiter = field_delimiter
self.max_bad_records = max_bad_records
self.quote_character = quote_character
self.ignore_unknown_values = ignore_unknown_values
self.allow_quoted_newlines = allow_quoted_newlines
self.allow_jagged_rows = allow_jagged_rows
self.external_table = external_table
self.encoding = encoding
self.max_id_key = max_id_key
self.bigquery_conn_id = bigquery_conn_id
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.delegate_to = delegate_to
self.schema_update_options = schema_update_options
self.src_fmt_configs = src_fmt_configs
self.time_partitioning = time_partitioning
self.cluster_fields = cluster_fields
self.autodetect = autodetect
self.encryption_configuration = encryption_configuration
self.location = location
def execute(self, context):
bq_hook = BigQueryHook(bigquery_conn_id=self.bigquery_conn_id,
delegate_to=self.delegate_to,
location=self.location)
if not self.schema_fields:
if self.schema_object and self.source_format != 'DATASTORE_BACKUP':
gcs_hook = GCSHook(
google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to)
schema_fields = json.loads(gcs_hook.download(
self.bucket,
self.schema_object).decode("utf-8"))
elif self.schema_object is None and self.autodetect is False:
raise AirflowException('At least one of `schema_fields`, '
'`schema_object`, or `autodetect` must be passed.')
else:
schema_fields = None
else:
schema_fields = self.schema_fields
source_uris = ['gs://{}/{}'.format(self.bucket, source_object)
for source_object in self.source_objects]
conn = bq_hook.get_conn()
cursor = conn.cursor()
if self.external_table:
cursor.create_external_table(
external_project_dataset_table=self.destination_project_dataset_table,
schema_fields=schema_fields,
source_uris=source_uris,
source_format=self.source_format,
compression=self.compression,
skip_leading_rows=self.skip_leading_rows,
field_delimiter=self.field_delimiter,
max_bad_records=self.max_bad_records,
quote_character=self.quote_character,
ignore_unknown_values=self.ignore_unknown_values,
allow_quoted_newlines=self.allow_quoted_newlines,
allow_jagged_rows=self.allow_jagged_rows,
encoding=self.encoding,
src_fmt_configs=self.src_fmt_configs,
encryption_configuration=self.encryption_configuration
)
else:
cursor.run_load(
destination_project_dataset_table=self.destination_project_dataset_table,
schema_fields=schema_fields,
source_uris=source_uris,
source_format=self.source_format,
autodetect=self.autodetect,
create_disposition=self.create_disposition,
skip_leading_rows=self.skip_leading_rows,
write_disposition=self.write_disposition,
field_delimiter=self.field_delimiter,
max_bad_records=self.max_bad_records,
quote_character=self.quote_character,
ignore_unknown_values=self.ignore_unknown_values,
allow_quoted_newlines=self.allow_quoted_newlines,
allow_jagged_rows=self.allow_jagged_rows,
encoding=self.encoding,
schema_update_options=self.schema_update_options,
src_fmt_configs=self.src_fmt_configs,
time_partitioning=self.time_partitioning,
cluster_fields=self.cluster_fields,
encryption_configuration=self.encryption_configuration)
if cursor.use_legacy_sql:
escaped_table_name = f'[{self.destination_project_dataset_table}]'
else:
escaped_table_name = f'`{self.destination_project_dataset_table}`'
if self.max_id_key:
cursor.execute('SELECT MAX({}) FROM {}'.format(
self.max_id_key,
escaped_table_name))
row = cursor.fetchone()
max_id = row[0] if row[0] else 0
self.log.info(
'Loaded BQ data with max %s.%s=%s',
self.destination_project_dataset_table, self.max_id_key, max_id
)
|
e-democracy/edemsignups | refs/heads/master | lib/gdata/tlslite/utils/PyCrypto_RC4.py | 359 | """PyCrypto RC4 implementation."""
from cryptomath import *
from RC4 import *
if pycryptoLoaded:
import Crypto.Cipher.ARC4
def new(key):
return PyCrypto_RC4(key)
class PyCrypto_RC4(RC4):
def __init__(self, key):
RC4.__init__(self, key, "pycrypto")
self.context = Crypto.Cipher.ARC4.new(key)
def encrypt(self, plaintext):
return self.context.encrypt(plaintext)
def decrypt(self, ciphertext):
return self.context.decrypt(ciphertext) |
eeshangarg/oh-mainline | refs/heads/master | vendor/packages/scrapy/scrapy/cmdline.py | 16 | from __future__ import with_statement
import sys
import os
import optparse
import cProfile
import inspect
import scrapy
from scrapy.crawler import CrawlerProcess
from scrapy.xlib import lsprofcalltree
from scrapy.conf import settings
from scrapy.command import ScrapyCommand
from scrapy.exceptions import UsageError, ScrapyDeprecationWarning
from scrapy.utils.misc import walk_modules
from scrapy.utils.project import inside_project
def _iter_command_classes(module_name):
# TODO: add `name` attribute to commands and and merge this function with
# scrapy.utils.spider.iter_spider_classes
for module in walk_modules(module_name):
for obj in vars(module).itervalues():
if inspect.isclass(obj) and \
issubclass(obj, ScrapyCommand) and \
obj.__module__ == module.__name__:
yield obj
def _get_commands_from_module(module, inproject):
d = {}
for cmd in _iter_command_classes(module):
if inproject or not cmd.requires_project:
cmdname = cmd.__module__.split('.')[-1]
d[cmdname] = cmd()
return d
def _get_commands_dict(inproject):
cmds = _get_commands_from_module('scrapy.commands', inproject)
cmds_module = settings['COMMANDS_MODULE']
if cmds_module:
cmds.update(_get_commands_from_module(cmds_module, inproject))
return cmds
def _pop_command_name(argv):
i = 0
for arg in argv[1:]:
if not arg.startswith('-'):
del argv[i]
return arg
i += 1
def _print_header(inproject):
if inproject:
print "Scrapy %s - project: %s\n" % (scrapy.__version__, \
settings['BOT_NAME'])
else:
print "Scrapy %s - no active project\n" % scrapy.__version__
def _print_commands(inproject):
_print_header(inproject)
print "Usage:"
print " scrapy <command> [options] [args]\n"
print "Available commands:"
cmds = _get_commands_dict(inproject)
for cmdname, cmdclass in sorted(cmds.iteritems()):
print " %-13s %s" % (cmdname, cmdclass.short_desc())
print
print 'Use "scrapy <command> -h" to see more info about a command'
def _print_unknown_command(cmdname, inproject):
_print_header(inproject)
print "Unknown command: %s\n" % cmdname
print 'Use "scrapy" to see available commands'
if not inproject:
print
print "More commands are available in project mode"
def _check_deprecated_scrapy_ctl(argv, inproject):
"""Check if Scrapy was called using the deprecated scrapy-ctl command and
warn in that case, also creating a scrapy.cfg if it doesn't exist.
"""
if not any('scrapy-ctl' in x for x in argv):
return
import warnings
warnings.warn("`scrapy-ctl.py` command-line tool is deprecated and will be removed in Scrapy 0.11, use `scrapy` instead",
ScrapyDeprecationWarning, stacklevel=3)
if inproject:
projpath = os.path.abspath(os.path.dirname(os.path.dirname(settings.settings_module.__file__)))
cfg_path = os.path.join(projpath, 'scrapy.cfg')
if not os.path.exists(cfg_path):
with open(cfg_path, 'w') as f:
f.write("# generated automatically - feel free to edit" + os.linesep)
f.write("[settings]" + os.linesep)
f.write("default = %s" % settings.settings_module.__name__ + os.linesep)
def _run_print_help(parser, func, *a, **kw):
try:
func(*a, **kw)
except UsageError, e:
if str(e):
parser.error(str(e))
if e.print_help:
parser.print_help()
sys.exit(2)
def execute(argv=None):
if argv is None:
argv = sys.argv
crawler = CrawlerProcess(settings)
crawler.install()
inproject = inside_project()
_check_deprecated_scrapy_ctl(argv, inproject) # TODO: remove for Scrapy 0.11
cmds = _get_commands_dict(inproject)
cmdname = _pop_command_name(argv)
parser = optparse.OptionParser(formatter=optparse.TitledHelpFormatter(), \
conflict_handler='resolve')
if not cmdname:
_print_commands(inproject)
sys.exit(0)
elif cmdname not in cmds:
_print_unknown_command(cmdname, inproject)
sys.exit(2)
cmd = cmds[cmdname]
parser.usage = "scrapy %s %s" % (cmdname, cmd.syntax())
parser.description = cmd.long_desc()
settings.defaults.update(cmd.default_settings)
cmd.settings = settings
cmd.add_options(parser)
opts, args = parser.parse_args(args=argv[1:])
_run_print_help(parser, cmd.process_options, args, opts)
cmd.set_crawler(crawler)
_run_print_help(parser, _run_command, cmd, args, opts)
sys.exit(cmd.exitcode)
def _run_command(cmd, args, opts):
if opts.profile or opts.lsprof:
_run_command_profiled(cmd, args, opts)
else:
cmd.run(args, opts)
def _run_command_profiled(cmd, args, opts):
if opts.profile:
sys.stderr.write("scrapy: writing cProfile stats to %r\n" % opts.profile)
if opts.lsprof:
sys.stderr.write("scrapy: writing lsprof stats to %r\n" % opts.lsprof)
loc = locals()
p = cProfile.Profile()
p.runctx('cmd.run(args, opts)', globals(), loc)
if opts.profile:
p.dump_stats(opts.profile)
k = lsprofcalltree.KCacheGrind(p)
if opts.lsprof:
with open(opts.lsprof, 'w') as f:
k.output(f)
if __name__ == '__main__':
execute()
|
dcsquared13/Diamond | refs/heads/master | src/collectors/diskusage/test/testdiskusage.py | 31 | #!/usr/bin/python
# coding=utf-8
##########################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from diskusage import DiskUsageCollector
##########################################################################
class TestDiskUsageCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('DiskUsageCollector', {
'interval': 10,
'sector_size': '512',
'byte_unit': 'kilobyte'
})
self.collector = DiskUsageCollector(config, None)
def test_config(self):
self.assertFalse(self.collector.config['send_zero'])
def test_import(self):
self.assertTrue(DiskUsageCollector)
@patch('os.access', Mock(return_value=True))
def test_get_disk_statistics(self):
patch_open = patch(
'__builtin__.open',
Mock(return_value=self.getFixture('diskstats')))
open_mock = patch_open.start()
result = self.collector.get_disk_statistics()
patch_open.stop()
open_mock.assert_called_once_with('/proc/diskstats')
self.assertEqual(
sorted(result.keys()),
[(8, 0), (8, 1), (8, 16), (8, 17), (8, 32),
(8, 33), (8, 48), (8, 49), (9, 0)])
return result
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
patch_open = patch(
'__builtin__.open',
Mock(
return_value=self.getFixture('proc_diskstats_1')))
patch_time = patch('time.time', Mock(return_value=10))
patch_open.start()
patch_time.start()
self.collector.collect()
patch_open.stop()
patch_time.stop()
self.assertPublishedMany(publish_mock, {})
patch_open = patch(
'__builtin__.open',
Mock(
return_value=self.getFixture('proc_diskstats_2')))
patch_time = patch('time.time', Mock(return_value=20))
patch_open.start()
patch_time.start()
self.collector.collect()
patch_open.stop()
patch_time.stop()
metrics = self.getPickledResults('test_should_work_with_real_data.pkl')
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_verify_supporting_vda_and_xvdb(self, publish_mock):
patch_open = patch(
'__builtin__.open',
Mock(
return_value=self.getFixture(
'proc_diskstats_1_vda_xvdb')))
patch_time = patch('time.time', Mock(return_value=10))
patch_open.start()
patch_time.start()
self.collector.collect()
patch_open.stop()
patch_time.stop()
self.assertPublishedMany(publish_mock, {})
patch_open = patch(
'__builtin__.open',
Mock(
return_value=self.getFixture(
'proc_diskstats_2_vda_xvdb')))
patch_time = patch('time.time', Mock(return_value=20))
patch_open.start()
patch_time.start()
self.collector.collect()
patch_open.stop()
patch_time.stop()
metrics = self.getPickledResults(
'test_verify_supporting_vda_and_xvdb.pkl')
self.assertPublishedMany(publish_mock, metrics)
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_verify_supporting_md_dm(self, publish_mock):
patch_open = patch(
'__builtin__.open',
Mock(
return_value=self.getFixture(
'proc_diskstats_1_md_dm')))
patch_time = patch('time.time', Mock(return_value=10))
patch_open.start()
patch_time.start()
self.collector.collect()
patch_open.stop()
patch_time.stop()
self.assertPublishedMany(publish_mock, {})
patch_open = patch(
'__builtin__.open',
Mock(
return_value=self.getFixture(
'proc_diskstats_2_md_dm')))
patch_time = patch('time.time', Mock(return_value=20))
patch_open.start()
patch_time.start()
self.collector.collect()
patch_open.stop()
patch_time.stop()
metrics = self.getPickledResults('test_verify_supporting_md_dm.pkl')
self.assertPublishedMany(publish_mock, metrics)
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_verify_supporting_disk(self, publish_mock):
patch_open = patch(
'__builtin__.open',
Mock(
return_value=self.getFixture(
'proc_diskstats_1_disk')))
patch_time = patch('time.time', Mock(return_value=10))
patch_open.start()
patch_time.start()
self.collector.collect()
patch_open.stop()
patch_time.stop()
self.assertPublishedMany(publish_mock, {})
patch_open = patch(
'__builtin__.open',
Mock(
return_value=self.getFixture(
'proc_diskstats_2_disk')))
patch_time = patch('time.time', Mock(return_value=20))
patch_open.start()
patch_time.start()
self.collector.collect()
patch_open.stop()
patch_time.stop()
metrics = self.getPickledResults('test_verify_supporting_disk.pkl')
self.assertPublishedMany(publish_mock, metrics)
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_service_Time(self, publish_mock):
patch_open = patch(
'__builtin__.open',
Mock(
return_value=self.getFixture(
'proc_diskstats_1_service_time')))
patch_time = patch('time.time', Mock(return_value=10))
patch_open.start()
patch_time.start()
self.collector.collect()
patch_open.stop()
patch_time.stop()
self.assertPublishedMany(publish_mock, {})
patch_open = patch(
'__builtin__.open',
Mock(
return_value=self.getFixture(
'proc_diskstats_2_service_time')))
patch_time = patch('time.time', Mock(return_value=70))
patch_open.start()
patch_time.start()
self.collector.collect()
patch_open.stop()
patch_time.stop()
metrics = self.getPickledResults('test_service_Time.pkl')
self.assertPublishedMany(publish_mock, metrics)
##########################################################################
if __name__ == "__main__":
unittest.main()
|
yugangw-msft/azure-cli | refs/heads/dev | src/azure-cli/azure/cli/command_modules/dms/tests/latest/__init__.py | 1233 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
|
absperf/wagtailapproval | refs/heads/master | tests/app/migrations/__init__.py | 12133432 | |
arbrandes/edx-platform | refs/heads/master | openedx/core/djangoapps/crawlers/tests/__init__.py | 12133432 | |
endorphinl/horizon | refs/heads/master | openstack_dashboard/dashboards/project/routers/ports/__init__.py | 12133432 | |
tedi3231/openerp | refs/heads/master | build/lib/openerp/addons/document_webdav/cache.py | 67 | import time
import heapq
def memoize(maxsize):
"""decorator to 'memoize' a function - caching its results"""
def decorating_function(f):
cache = {} # map from key to value
heap = [] # list of keys, in LRU heap
cursize = 0 # because len() is slow
def wrapper(*args):
key = repr(args)
# performance crap
_cache=cache
_heap=heap
_heappop = heapq.heappop
_heappush = heapq.heappush
_time = time.time
_cursize = cursize
_maxsize = maxsize
if not _cache.has_key(key):
if _cursize == _maxsize:
# pop oldest element
(_,oldkey) = _heappop(_heap)
_cache.pop(oldkey)
else:
_cursize += 1
# insert this element
_cache[key] = f(*args)
_heappush(_heap,(_time(),key))
wrapper.misses += 1
else:
wrapper.hits += 1
return cache[key]
wrapper.__doc__ = f.__doc__
wrapper.__name__ = f.__name__
wrapper.hits = wrapper.misses = 0
return wrapper
return decorating_function
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
tamsky/cloud-init | refs/heads/master | cloudinit/util.py | 3 | # vi: ts=4 expandtab
#
# Copyright (C) 2012 Canonical Ltd.
# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
# Copyright (C) 2012 Yahoo! Inc.
#
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
# Author: Joshua Harlow <harlowja@yahoo-inc.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import contextlib
import copy as obj_copy
import ctypes
import email
import errno
import glob
import grp
import gzip
import hashlib
import json
import os
import os.path
import platform
import pwd
import random
import re
import shutil
import socket
import stat
import string
import subprocess
import sys
import tempfile
import time
from base64 import b64decode, b64encode
from six.moves.urllib import parse as urlparse
import six
import yaml
from cloudinit import importer
from cloudinit import log as logging
from cloudinit import mergers
from cloudinit import safeyaml
from cloudinit import type_utils
from cloudinit import url_helper
from cloudinit import version
from cloudinit.settings import (CFG_BUILTIN)
_DNS_REDIRECT_IP = None
LOG = logging.getLogger(__name__)
# Helps cleanup filenames to ensure they aren't FS incompatible
FN_REPLACEMENTS = {
os.sep: '_',
}
FN_ALLOWED = ('_-.()' + string.digits + string.ascii_letters)
TRUE_STRINGS = ('true', '1', 'on', 'yes')
FALSE_STRINGS = ('off', '0', 'no', 'false')
# Helper utils to see if running in a container
CONTAINER_TESTS = ('running-in-container', 'lxc-is-container')
def decode_binary(blob, encoding='utf-8'):
# Converts a binary type into a text type using given encoding.
if isinstance(blob, six.text_type):
return blob
return blob.decode(encoding)
def encode_text(text, encoding='utf-8'):
# Converts a text string into a binary type using given encoding.
if isinstance(text, six.binary_type):
return text
return text.encode(encoding)
def b64d(source):
# Base64 decode some data, accepting bytes or unicode/str, and returning
# str/unicode if the result is utf-8 compatible, otherwise returning bytes.
decoded = b64decode(source)
try:
return decoded.decode('utf-8')
except UnicodeDecodeError:
return decoded
def b64e(source):
# Base64 encode some data, accepting bytes or unicode/str, and returning
# str/unicode if the result is utf-8 compatible, otherwise returning bytes.
if not isinstance(source, bytes):
source = source.encode('utf-8')
return b64encode(source).decode('utf-8')
def fully_decoded_payload(part):
# In Python 3, decoding the payload will ironically hand us a bytes object.
# 'decode' means to decode according to Content-Transfer-Encoding, not
# according to any charset in the Content-Type. So, if we end up with
# bytes, first try to decode to str via CT charset, and failing that, try
# utf-8 using surrogate escapes.
cte_payload = part.get_payload(decode=True)
if (six.PY3 and
part.get_content_maintype() == 'text' and
isinstance(cte_payload, bytes)):
charset = part.get_charset()
if charset and charset.input_codec:
encoding = charset.input_codec
else:
encoding = 'utf-8'
return cte_payload.decode(encoding, errors='surrogateescape')
return cte_payload
# Path for DMI Data
DMI_SYS_PATH = "/sys/class/dmi/id"
# dmidecode and /sys/class/dmi/id/* use different names for the same value,
# this allows us to refer to them by one canonical name
DMIDECODE_TO_DMI_SYS_MAPPING = {
'baseboard-asset-tag': 'board_asset_tag',
'baseboard-manufacturer': 'board_vendor',
'baseboard-product-name': 'board_name',
'baseboard-serial-number': 'board_serial',
'baseboard-version': 'board_version',
'bios-release-date': 'bios_date',
'bios-vendor': 'bios_vendor',
'bios-version': 'bios_version',
'chassis-asset-tag': 'chassis_asset_tag',
'chassis-manufacturer': 'chassis_vendor',
'chassis-serial-number': 'chassis_serial',
'chassis-version': 'chassis_version',
'system-manufacturer': 'sys_vendor',
'system-product-name': 'product_name',
'system-serial-number': 'product_serial',
'system-uuid': 'product_uuid',
'system-version': 'product_version',
}
class ProcessExecutionError(IOError):
MESSAGE_TMPL = ('%(description)s\n'
'Command: %(cmd)s\n'
'Exit code: %(exit_code)s\n'
'Reason: %(reason)s\n'
'Stdout: %(stdout)r\n'
'Stderr: %(stderr)r')
def __init__(self, stdout=None, stderr=None,
exit_code=None, cmd=None,
description=None, reason=None):
if not cmd:
self.cmd = '-'
else:
self.cmd = cmd
if not description:
self.description = 'Unexpected error while running command.'
else:
self.description = description
if not isinstance(exit_code, six.integer_types):
self.exit_code = '-'
else:
self.exit_code = exit_code
if not stderr:
self.stderr = ''
else:
self.stderr = stderr
if not stdout:
self.stdout = ''
else:
self.stdout = stdout
if reason:
self.reason = reason
else:
self.reason = '-'
message = self.MESSAGE_TMPL % {
'description': self.description,
'cmd': self.cmd,
'exit_code': self.exit_code,
'stdout': self.stdout,
'stderr': self.stderr,
'reason': self.reason,
}
IOError.__init__(self, message)
# For backward compatibility with Python 2.
if not hasattr(self, 'message'):
self.message = message
class SeLinuxGuard(object):
def __init__(self, path, recursive=False):
# Late import since it might not always
# be possible to use this
try:
self.selinux = importer.import_module('selinux')
except ImportError:
self.selinux = None
self.path = path
self.recursive = recursive
def __enter__(self):
if self.selinux and self.selinux.is_selinux_enabled():
return True
else:
return False
def __exit__(self, excp_type, excp_value, excp_traceback):
if not self.selinux or not self.selinux.is_selinux_enabled():
return
if not os.path.lexists(self.path):
return
path = os.path.realpath(self.path)
# path should be a string, not unicode
if six.PY2:
path = str(path)
try:
stats = os.lstat(path)
self.selinux.matchpathcon(path, stats[stat.ST_MODE])
except OSError:
return
LOG.debug("Restoring selinux mode for %s (recursive=%s)",
path, self.recursive)
self.selinux.restorecon(path, recursive=self.recursive)
class MountFailedError(Exception):
pass
class DecompressionError(Exception):
pass
def ExtendedTemporaryFile(**kwargs):
fh = tempfile.NamedTemporaryFile(**kwargs)
# Replace its unlink with a quiet version
# that does not raise errors when the
# file to unlink has been unlinked elsewhere..
LOG.debug("Created temporary file %s", fh.name)
fh.unlink = del_file
# Add a new method that will unlink
# right 'now' but still lets the exit
# method attempt to remove it (which will
# not throw due to our del file being quiet
# about files that are not there)
def unlink_now():
fh.unlink(fh.name)
setattr(fh, 'unlink_now', unlink_now)
return fh
def fork_cb(child_cb, *args, **kwargs):
fid = os.fork()
if fid == 0:
try:
child_cb(*args, **kwargs)
os._exit(0)
except:
logexc(LOG, "Failed forking and calling callback %s",
type_utils.obj_name(child_cb))
os._exit(1)
else:
LOG.debug("Forked child %s who will run callback %s",
fid, type_utils.obj_name(child_cb))
def is_true(val, addons=None):
if isinstance(val, (bool)):
return val is True
check_set = TRUE_STRINGS
if addons:
check_set = list(check_set) + addons
if six.text_type(val).lower().strip() in check_set:
return True
return False
def is_false(val, addons=None):
if isinstance(val, (bool)):
return val is False
check_set = FALSE_STRINGS
if addons:
check_set = list(check_set) + addons
if six.text_type(val).lower().strip() in check_set:
return True
return False
def translate_bool(val, addons=None):
if not val:
# This handles empty lists and false and
# other things that python believes are false
return False
# If its already a boolean skip
if isinstance(val, (bool)):
return val
return is_true(val, addons)
def rand_str(strlen=32, select_from=None):
if not select_from:
select_from = string.ascii_letters + string.digits
return "".join([random.choice(select_from) for _x in range(0, strlen)])
def read_conf(fname):
try:
return load_yaml(load_file(fname), default={})
except IOError as e:
if e.errno == errno.ENOENT:
return {}
else:
raise
# Merges X lists, and then keeps the
# unique ones, but orders by sort order
# instead of by the original order
def uniq_merge_sorted(*lists):
return sorted(uniq_merge(*lists))
# Merges X lists and then iterates over those
# and only keeps the unique items (order preserving)
# and returns that merged and uniqued list as the
# final result.
#
# Note: if any entry is a string it will be
# split on commas and empty entries will be
# evicted and merged in accordingly.
def uniq_merge(*lists):
combined_list = []
for a_list in lists:
if isinstance(a_list, six.string_types):
a_list = a_list.strip().split(",")
# Kickout the empty ones
a_list = [a for a in a_list if len(a)]
combined_list.extend(a_list)
return uniq_list(combined_list)
def clean_filename(fn):
for (k, v) in FN_REPLACEMENTS.items():
fn = fn.replace(k, v)
removals = []
for k in fn:
if k not in FN_ALLOWED:
removals.append(k)
for k in removals:
fn = fn.replace(k, '')
fn = fn.strip()
return fn
def decomp_gzip(data, quiet=True, decode=True):
try:
buf = six.BytesIO(encode_text(data))
with contextlib.closing(gzip.GzipFile(None, "rb", 1, buf)) as gh:
if decode:
return decode_binary(gh.read())
else:
return gh.read()
except Exception as e:
if quiet:
return data
else:
raise DecompressionError(six.text_type(e))
def extract_usergroup(ug_pair):
if not ug_pair:
return (None, None)
ug_parted = ug_pair.split(':', 1)
u = ug_parted[0].strip()
if len(ug_parted) == 2:
g = ug_parted[1].strip()
else:
g = None
if not u or u == "-1" or u.lower() == "none":
u = None
if not g or g == "-1" or g.lower() == "none":
g = None
return (u, g)
def find_modules(root_dir):
entries = dict()
for fname in glob.glob(os.path.join(root_dir, "*.py")):
if not os.path.isfile(fname):
continue
modname = os.path.basename(fname)[0:-3]
modname = modname.strip()
if modname and modname.find(".") == -1:
entries[fname] = modname
return entries
def multi_log(text, console=True, stderr=True,
log=None, log_level=logging.DEBUG):
if stderr:
sys.stderr.write(text)
if console:
conpath = "/dev/console"
if os.path.exists(conpath):
with open(conpath, 'w') as wfh:
wfh.write(text)
wfh.flush()
else:
# A container may lack /dev/console (arguably a container bug). If
# it does not exist, then write output to stdout. this will result
# in duplicate stderr and stdout messages if stderr was True.
#
# even though upstart or systemd might have set up output to go to
# /dev/console, the user may have configured elsewhere via
# cloud-config 'output'. If there is /dev/console, messages will
# still get there.
sys.stdout.write(text)
if log:
if text[-1] == "\n":
log.log(log_level, text[:-1])
else:
log.log(log_level, text)
def load_json(text, root_types=(dict,)):
decoded = json.loads(decode_binary(text))
if not isinstance(decoded, tuple(root_types)):
expected_types = ", ".join([str(t) for t in root_types])
raise TypeError("(%s) root types expected, got %s instead"
% (expected_types, type(decoded)))
return decoded
def is_ipv4(instr):
"""determine if input string is a ipv4 address. return boolean."""
toks = instr.split('.')
if len(toks) != 4:
return False
try:
toks = [x for x in toks if int(x) < 256 and int(x) >= 0]
except:
return False
return len(toks) == 4
def get_cfg_option_bool(yobj, key, default=False):
if key not in yobj:
return default
return translate_bool(yobj[key])
def get_cfg_option_str(yobj, key, default=None):
if key not in yobj:
return default
val = yobj[key]
if not isinstance(val, six.string_types):
val = str(val)
return val
def get_cfg_option_int(yobj, key, default=0):
return int(get_cfg_option_str(yobj, key, default=default))
def system_info():
return {
'platform': platform.platform(),
'release': platform.release(),
'python': platform.python_version(),
'uname': platform.uname(),
'dist': platform.linux_distribution(),
}
def get_cfg_option_list(yobj, key, default=None):
"""
Gets the C{key} config option from C{yobj} as a list of strings. If the
key is present as a single string it will be returned as a list with one
string arg.
@param yobj: The configuration object.
@param key: The configuration key to get.
@param default: The default to return if key is not found.
@return: The configuration option as a list of strings or default if key
is not found.
"""
if key not in yobj:
return default
if yobj[key] is None:
return []
val = yobj[key]
if isinstance(val, (list)):
cval = [v for v in val]
return cval
if not isinstance(val, six.string_types):
val = str(val)
return [val]
# get a cfg entry by its path array
# for f['a']['b']: get_cfg_by_path(mycfg,('a','b'))
def get_cfg_by_path(yobj, keyp, default=None):
cur = yobj
for tok in keyp:
if tok not in cur:
return default
cur = cur[tok]
return cur
def fixup_output(cfg, mode):
(outfmt, errfmt) = get_output_cfg(cfg, mode)
redirect_output(outfmt, errfmt)
return (outfmt, errfmt)
# redirect_output(outfmt, errfmt, orig_out, orig_err)
# replace orig_out and orig_err with filehandles specified in outfmt or errfmt
# fmt can be:
# > FILEPATH
# >> FILEPATH
# | program [ arg1 [ arg2 [ ... ] ] ]
#
# with a '|', arguments are passed to shell, so one level of
# shell escape is required.
#
# if _CLOUD_INIT_SAVE_STDOUT is set in environment to a non empty and true
# value then output input will not be closed (useful for debugging).
#
def redirect_output(outfmt, errfmt, o_out=None, o_err=None):
if is_true(os.environ.get("_CLOUD_INIT_SAVE_STDOUT")):
LOG.debug("Not redirecting output due to _CLOUD_INIT_SAVE_STDOUT")
return
if not o_out:
o_out = sys.stdout
if not o_err:
o_err = sys.stderr
if outfmt:
LOG.debug("Redirecting %s to %s", o_out, outfmt)
(mode, arg) = outfmt.split(" ", 1)
if mode == ">" or mode == ">>":
owith = "ab"
if mode == ">":
owith = "wb"
new_fp = open(arg, owith)
elif mode == "|":
proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE)
new_fp = proc.stdin
else:
raise TypeError("Invalid type for output format: %s" % outfmt)
if o_out:
os.dup2(new_fp.fileno(), o_out.fileno())
if errfmt == outfmt:
LOG.debug("Redirecting %s to %s", o_err, outfmt)
os.dup2(new_fp.fileno(), o_err.fileno())
return
if errfmt:
LOG.debug("Redirecting %s to %s", o_err, errfmt)
(mode, arg) = errfmt.split(" ", 1)
if mode == ">" or mode == ">>":
owith = "ab"
if mode == ">":
owith = "wb"
new_fp = open(arg, owith)
elif mode == "|":
proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE)
new_fp = proc.stdin
else:
raise TypeError("Invalid type for error format: %s" % errfmt)
if o_err:
os.dup2(new_fp.fileno(), o_err.fileno())
def make_url(scheme, host, port=None,
path='', params='', query='', fragment=''):
pieces = []
pieces.append(scheme or '')
netloc = ''
if host:
netloc = str(host)
if port is not None:
netloc += ":" + "%s" % (port)
pieces.append(netloc or '')
pieces.append(path or '')
pieces.append(params or '')
pieces.append(query or '')
pieces.append(fragment or '')
return urlparse.urlunparse(pieces)
def mergemanydict(srcs, reverse=False):
if reverse:
srcs = reversed(srcs)
merged_cfg = {}
for cfg in srcs:
if cfg:
# Figure out which mergers to apply...
mergers_to_apply = mergers.dict_extract_mergers(cfg)
if not mergers_to_apply:
mergers_to_apply = mergers.default_mergers()
merger = mergers.construct(mergers_to_apply)
merged_cfg = merger.merge(merged_cfg, cfg)
return merged_cfg
@contextlib.contextmanager
def chdir(ndir):
curr = os.getcwd()
try:
os.chdir(ndir)
yield ndir
finally:
os.chdir(curr)
@contextlib.contextmanager
def umask(n_msk):
old = os.umask(n_msk)
try:
yield old
finally:
os.umask(old)
@contextlib.contextmanager
def tempdir(**kwargs):
# This seems like it was only added in python 3.2
# Make it since its useful...
# See: http://bugs.python.org/file12970/tempdir.patch
tdir = tempfile.mkdtemp(**kwargs)
try:
yield tdir
finally:
del_dir(tdir)
def center(text, fill, max_len):
return '{0:{fill}{align}{size}}'.format(text, fill=fill,
align="^", size=max_len)
def del_dir(path):
LOG.debug("Recursively deleting %s", path)
shutil.rmtree(path)
def runparts(dirp, skip_no_exist=True, exe_prefix=None):
if skip_no_exist and not os.path.isdir(dirp):
return
failed = []
attempted = []
if exe_prefix is None:
prefix = []
elif isinstance(exe_prefix, str):
prefix = [str(exe_prefix)]
elif isinstance(exe_prefix, list):
prefix = exe_prefix
else:
raise TypeError("exe_prefix must be None, str, or list")
for exe_name in sorted(os.listdir(dirp)):
exe_path = os.path.join(dirp, exe_name)
if os.path.isfile(exe_path) and os.access(exe_path, os.X_OK):
attempted.append(exe_path)
try:
subp(prefix + [exe_path], capture=False)
except ProcessExecutionError as e:
logexc(LOG, "Failed running %s [%s]", exe_path, e.exit_code)
failed.append(e)
if failed and attempted:
raise RuntimeError('Runparts: %s failures in %s attempted commands'
% (len(failed), len(attempted)))
# read_optional_seed
# returns boolean indicating success or failure (presense of files)
# if files are present, populates 'fill' dictionary with 'user-data' and
# 'meta-data' entries
def read_optional_seed(fill, base="", ext="", timeout=5):
try:
(md, ud) = read_seeded(base, ext, timeout)
fill['user-data'] = ud
fill['meta-data'] = md
return True
except url_helper.UrlError as e:
if e.code == url_helper.NOT_FOUND:
return False
raise
def fetch_ssl_details(paths=None):
ssl_details = {}
# Lookup in these locations for ssl key/cert files
ssl_cert_paths = [
'/var/lib/cloud/data/ssl',
'/var/lib/cloud/instance/data/ssl',
]
if paths:
ssl_cert_paths.extend([
os.path.join(paths.get_ipath_cur('data'), 'ssl'),
os.path.join(paths.get_cpath('data'), 'ssl'),
])
ssl_cert_paths = uniq_merge(ssl_cert_paths)
ssl_cert_paths = [d for d in ssl_cert_paths if d and os.path.isdir(d)]
cert_file = None
for d in ssl_cert_paths:
if os.path.isfile(os.path.join(d, 'cert.pem')):
cert_file = os.path.join(d, 'cert.pem')
break
key_file = None
for d in ssl_cert_paths:
if os.path.isfile(os.path.join(d, 'key.pem')):
key_file = os.path.join(d, 'key.pem')
break
if cert_file and key_file:
ssl_details['cert_file'] = cert_file
ssl_details['key_file'] = key_file
elif cert_file:
ssl_details['cert_file'] = cert_file
return ssl_details
def read_file_or_url(url, timeout=5, retries=10,
headers=None, data=None, sec_between=1, ssl_details=None,
headers_cb=None, exception_cb=None):
url = url.lstrip()
if url.startswith("/"):
url = "file://%s" % url
if url.lower().startswith("file://"):
if data:
LOG.warn("Unable to post data to file resource %s", url)
file_path = url[len("file://"):]
try:
contents = load_file(file_path, decode=False)
except IOError as e:
code = e.errno
if e.errno == errno.ENOENT:
code = url_helper.NOT_FOUND
raise url_helper.UrlError(cause=e, code=code, headers=None,
url=url)
return url_helper.FileResponse(file_path, contents=contents)
else:
return url_helper.readurl(url,
timeout=timeout,
retries=retries,
headers=headers,
headers_cb=headers_cb,
data=data,
sec_between=sec_between,
ssl_details=ssl_details,
exception_cb=exception_cb)
def load_yaml(blob, default=None, allowed=(dict,)):
loaded = default
blob = decode_binary(blob)
try:
LOG.debug("Attempting to load yaml from string "
"of length %s with allowed root types %s",
len(blob), allowed)
converted = safeyaml.load(blob)
if not isinstance(converted, allowed):
# Yes this will just be caught, but thats ok for now...
raise TypeError(("Yaml load allows %s root types,"
" but got %s instead") %
(allowed, type_utils.obj_name(converted)))
loaded = converted
except (yaml.YAMLError, TypeError, ValueError):
if len(blob) == 0:
LOG.debug("load_yaml given empty string, returning default")
else:
logexc(LOG, "Failed loading yaml blob")
return loaded
def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0):
if base.startswith("/"):
base = "file://%s" % base
# default retries for file is 0. for network is 10
if base.startswith("file://"):
retries = file_retries
if base.find("%s") >= 0:
ud_url = base % ("user-data" + ext)
md_url = base % ("meta-data" + ext)
else:
ud_url = "%s%s%s" % (base, "user-data", ext)
md_url = "%s%s%s" % (base, "meta-data", ext)
md_resp = read_file_or_url(md_url, timeout, retries, file_retries)
md = None
if md_resp.ok():
md = load_yaml(decode_binary(md_resp.contents), default={})
ud_resp = read_file_or_url(ud_url, timeout, retries, file_retries)
ud = None
if ud_resp.ok():
ud = ud_resp.contents
return (md, ud)
def read_conf_d(confd):
# Get reverse sorted list (later trumps newer)
confs = sorted(os.listdir(confd), reverse=True)
# Remove anything not ending in '.cfg'
confs = [f for f in confs if f.endswith(".cfg")]
# Remove anything not a file
confs = [f for f in confs
if os.path.isfile(os.path.join(confd, f))]
# Load them all so that they can be merged
cfgs = []
for fn in confs:
cfgs.append(read_conf(os.path.join(confd, fn)))
return mergemanydict(cfgs)
def read_conf_with_confd(cfgfile):
cfg = read_conf(cfgfile)
confd = False
if "conf_d" in cfg:
confd = cfg['conf_d']
if confd:
if not isinstance(confd, six.string_types):
raise TypeError(("Config file %s contains 'conf_d' "
"with non-string type %s") %
(cfgfile, type_utils.obj_name(confd)))
else:
confd = str(confd).strip()
elif os.path.isdir("%s.d" % cfgfile):
confd = "%s.d" % cfgfile
if not confd or not os.path.isdir(confd):
return cfg
# Conf.d settings override input configuration
confd_cfg = read_conf_d(confd)
return mergemanydict([confd_cfg, cfg])
def read_cc_from_cmdline(cmdline=None):
# this should support reading cloud-config information from
# the kernel command line. It is intended to support content of the
# format:
# cc: <yaml content here> [end_cc]
# this would include:
# cc: ssh_import_id: [smoser, kirkland]\\n
# cc: ssh_import_id: [smoser, bob]\\nruncmd: [ [ ls, -l ], echo hi ] end_cc
# cc:ssh_import_id: [smoser] end_cc cc:runcmd: [ [ ls, -l ] ] end_cc
if cmdline is None:
cmdline = get_cmdline()
tag_begin = "cc:"
tag_end = "end_cc"
begin_l = len(tag_begin)
end_l = len(tag_end)
clen = len(cmdline)
tokens = []
begin = cmdline.find(tag_begin)
while begin >= 0:
end = cmdline.find(tag_end, begin + begin_l)
if end < 0:
end = clen
tokens.append(cmdline[begin + begin_l:end].lstrip().replace("\\n",
"\n"))
begin = cmdline.find(tag_begin, end + end_l)
return '\n'.join(tokens)
def dos2unix(contents):
# find first end of line
pos = contents.find('\n')
if pos <= 0 or contents[pos - 1] != '\r':
return contents
return contents.replace('\r\n', '\n')
def get_hostname_fqdn(cfg, cloud):
# return the hostname and fqdn from 'cfg'. If not found in cfg,
# then fall back to data from cloud
if "fqdn" in cfg:
# user specified a fqdn. Default hostname then is based off that
fqdn = cfg['fqdn']
hostname = get_cfg_option_str(cfg, "hostname", fqdn.split('.')[0])
else:
if "hostname" in cfg and cfg['hostname'].find('.') > 0:
# user specified hostname, and it had '.' in it
# be nice to them. set fqdn and hostname from that
fqdn = cfg['hostname']
hostname = cfg['hostname'][:fqdn.find('.')]
else:
# no fqdn set, get fqdn from cloud.
# get hostname from cfg if available otherwise cloud
fqdn = cloud.get_hostname(fqdn=True)
if "hostname" in cfg:
hostname = cfg['hostname']
else:
hostname = cloud.get_hostname()
return (hostname, fqdn)
def get_fqdn_from_hosts(hostname, filename="/etc/hosts"):
"""
For each host a single line should be present with
the following information:
IP_address canonical_hostname [aliases...]
Fields of the entry are separated by any number of blanks and/or tab
characters. Text from a "#" character until the end of the line is a
comment, and is ignored. Host names may contain only alphanumeric
characters, minus signs ("-"), and periods ("."). They must begin with
an alphabetic character and end with an alphanumeric character.
Optional aliases provide for name changes, alternate spellings, shorter
hostnames, or generic hostnames (for example, localhost).
"""
fqdn = None
try:
for line in load_file(filename).splitlines():
hashpos = line.find("#")
if hashpos >= 0:
line = line[0:hashpos]
line = line.strip()
if not line:
continue
# If there there is less than 3 entries
# (IP_address, canonical_hostname, alias)
# then ignore this line
toks = line.split()
if len(toks) < 3:
continue
if hostname in toks[2:]:
fqdn = toks[1]
break
except IOError:
pass
return fqdn
def get_cmdline_url(names=('cloud-config-url', 'url'),
starts=b"#cloud-config", cmdline=None):
if cmdline is None:
cmdline = get_cmdline()
data = keyval_str_to_dict(cmdline)
url = None
key = None
for key in names:
if key in data:
url = data[key]
break
if not url:
return (None, None, None)
resp = read_file_or_url(url)
# allow callers to pass starts as text when comparing to bytes contents
starts = encode_text(starts)
if resp.ok() and resp.contents.startswith(starts):
return (key, url, resp.contents)
return (key, url, None)
def is_resolvable(name):
"""determine if a url is resolvable, return a boolean
This also attempts to be resilent against dns redirection.
Note, that normal nsswitch resolution is used here. So in order
to avoid any utilization of 'search' entries in /etc/resolv.conf
we have to append '.'.
The top level 'invalid' domain is invalid per RFC. And example.com
should also not exist. The random entry will be resolved inside
the search list.
"""
global _DNS_REDIRECT_IP
if _DNS_REDIRECT_IP is None:
badips = set()
badnames = ("does-not-exist.example.com.", "example.invalid.",
rand_str())
badresults = {}
for iname in badnames:
try:
result = socket.getaddrinfo(iname, None, 0, 0,
socket.SOCK_STREAM, socket.AI_CANONNAME)
badresults[iname] = []
for (_fam, _stype, _proto, cname, sockaddr) in result:
badresults[iname].append("%s: %s" % (cname, sockaddr[0]))
badips.add(sockaddr[0])
except (socket.gaierror, socket.error):
pass
_DNS_REDIRECT_IP = badips
if badresults:
LOG.debug("detected dns redirection: %s", badresults)
try:
result = socket.getaddrinfo(name, None)
# check first result's sockaddr field
addr = result[0][4][0]
if addr in _DNS_REDIRECT_IP:
return False
return True
except (socket.gaierror, socket.error):
return False
def get_hostname():
hostname = socket.gethostname()
return hostname
def gethostbyaddr(ip):
try:
return socket.gethostbyaddr(ip)[0]
except socket.herror:
return None
def is_resolvable_url(url):
"""determine if this url is resolvable (existing or ip)."""
return is_resolvable(urlparse.urlparse(url).hostname)
def search_for_mirror(candidates):
"""
Search through a list of mirror urls for one that works
This needs to return quickly.
"""
for cand in candidates:
try:
if is_resolvable_url(cand):
return cand
except Exception:
pass
return None
def close_stdin():
"""
reopen stdin as /dev/null so even subprocesses or other os level things get
/dev/null as input.
if _CLOUD_INIT_SAVE_STDIN is set in environment to a non empty and true
value then input will not be closed (useful for debugging).
"""
if is_true(os.environ.get("_CLOUD_INIT_SAVE_STDIN")):
return
with open(os.devnull) as fp:
os.dup2(fp.fileno(), sys.stdin.fileno())
def find_devs_with(criteria=None, oformat='device',
tag=None, no_cache=False, path=None):
"""
find devices matching given criteria (via blkid)
criteria can be *one* of:
TYPE=<filesystem>
LABEL=<label>
UUID=<uuid>
"""
blk_id_cmd = ['blkid']
options = []
if criteria:
# Search for block devices with tokens named NAME that
# have the value 'value' and display any devices which are found.
# Common values for NAME include TYPE, LABEL, and UUID.
# If there are no devices specified on the command line,
# all block devices will be searched; otherwise,
# only search the devices specified by the user.
options.append("-t%s" % (criteria))
if tag:
# For each (specified) device, show only the tags that match tag.
options.append("-s%s" % (tag))
if no_cache:
# If you want to start with a clean cache
# (i.e. don't report devices previously scanned
# but not necessarily available at this time), specify /dev/null.
options.extend(["-c", "/dev/null"])
if oformat:
# Display blkid's output using the specified format.
# The format parameter may be:
# full, value, list, device, udev, export
options.append('-o%s' % (oformat))
if path:
options.append(path)
cmd = blk_id_cmd + options
# See man blkid for why 2 is added
(out, _err) = subp(cmd, rcs=[0, 2])
entries = []
for line in out.splitlines():
line = line.strip()
if line:
entries.append(line)
return entries
def peek_file(fname, max_bytes):
LOG.debug("Peeking at %s (max_bytes=%s)", fname, max_bytes)
with open(fname, 'rb') as ifh:
return ifh.read(max_bytes)
def uniq_list(in_list):
out_list = []
for i in in_list:
if i in out_list:
continue
else:
out_list.append(i)
return out_list
def load_file(fname, read_cb=None, quiet=False, decode=True):
LOG.debug("Reading from %s (quiet=%s)", fname, quiet)
ofh = six.BytesIO()
try:
with open(fname, 'rb') as ifh:
pipe_in_out(ifh, ofh, chunk_cb=read_cb)
except IOError as e:
if not quiet:
raise
if e.errno != errno.ENOENT:
raise
contents = ofh.getvalue()
LOG.debug("Read %s bytes from %s", len(contents), fname)
if decode:
return decode_binary(contents)
else:
return contents
def get_cmdline():
if 'DEBUG_PROC_CMDLINE' in os.environ:
cmdline = os.environ["DEBUG_PROC_CMDLINE"]
else:
try:
cmdline = load_file("/proc/cmdline").strip()
except:
cmdline = ""
return cmdline
def pipe_in_out(in_fh, out_fh, chunk_size=1024, chunk_cb=None):
bytes_piped = 0
while True:
data = in_fh.read(chunk_size)
if len(data) == 0:
break
else:
out_fh.write(data)
bytes_piped += len(data)
if chunk_cb:
chunk_cb(bytes_piped)
out_fh.flush()
return bytes_piped
def chownbyid(fname, uid=None, gid=None):
if uid in [None, -1] and gid in [None, -1]:
# Nothing to do
return
LOG.debug("Changing the ownership of %s to %s:%s", fname, uid, gid)
os.chown(fname, uid, gid)
def chownbyname(fname, user=None, group=None):
uid = -1
gid = -1
try:
if user:
uid = pwd.getpwnam(user).pw_uid
if group:
gid = grp.getgrnam(group).gr_gid
except KeyError as e:
raise OSError("Unknown user or group: %s" % (e))
chownbyid(fname, uid, gid)
# Always returns well formated values
# cfg is expected to have an entry 'output' in it, which is a dictionary
# that includes entries for 'init', 'config', 'final' or 'all'
# init: /var/log/cloud.out
# config: [ ">> /var/log/cloud-config.out", /var/log/cloud-config.err ]
# final:
# output: "| logger -p"
# error: "> /dev/null"
# this returns the specific 'mode' entry, cleanly formatted, with value
def get_output_cfg(cfg, mode):
ret = [None, None]
if not cfg or 'output' not in cfg:
return ret
outcfg = cfg['output']
if mode in outcfg:
modecfg = outcfg[mode]
else:
if 'all' not in outcfg:
return ret
# if there is a 'all' item in the output list
# then it applies to all users of this (init, config, final)
modecfg = outcfg['all']
# if value is a string, it specifies stdout and stderr
if isinstance(modecfg, str):
ret = [modecfg, modecfg]
# if its a list, then we expect (stdout, stderr)
if isinstance(modecfg, list):
if len(modecfg) > 0:
ret[0] = modecfg[0]
if len(modecfg) > 1:
ret[1] = modecfg[1]
# if it is a dictionary, expect 'out' and 'error'
# items, which indicate out and error
if isinstance(modecfg, dict):
if 'output' in modecfg:
ret[0] = modecfg['output']
if 'error' in modecfg:
ret[1] = modecfg['error']
# if err's entry == "&1", then make it same as stdout
# as in shell syntax of "echo foo >/dev/null 2>&1"
if ret[1] == "&1":
ret[1] = ret[0]
swlist = [">>", ">", "|"]
for i in range(len(ret)):
if not ret[i]:
continue
val = ret[i].lstrip()
found = False
for s in swlist:
if val.startswith(s):
val = "%s %s" % (s, val[len(s):].strip())
found = True
break
if not found:
# default behavior is append
val = "%s %s" % (">>", val.strip())
ret[i] = val
return ret
def logexc(log, msg, *args):
# Setting this here allows this to change
# levels easily (not always error level)
# or even desirable to have that much junk
# coming out to a non-debug stream
if msg:
log.warn(msg, *args)
# Debug gets the full trace. However, nose has a bug whereby its
# logcapture plugin doesn't properly handle the case where there is no
# actual exception. To avoid tracebacks during the test suite then, we'll
# do the actual exc_info extraction here, and if there is no exception in
# flight, we'll just pass in None.
exc_info = sys.exc_info()
if exc_info == (None, None, None):
exc_info = None
log.debug(msg, exc_info=exc_info, *args)
def hash_blob(blob, routine, mlen=None):
hasher = hashlib.new(routine)
hasher.update(encode_text(blob))
digest = hasher.hexdigest()
# Don't get to long now
if mlen is not None:
return digest[0:mlen]
else:
return digest
def is_user(name):
try:
if pwd.getpwnam(name):
return True
except KeyError:
return False
def is_group(name):
try:
if grp.getgrnam(name):
return True
except KeyError:
return False
def rename(src, dest):
LOG.debug("Renaming %s to %s", src, dest)
# TODO(harlowja) use a se guard here??
os.rename(src, dest)
def ensure_dirs(dirlist, mode=0o755):
for d in dirlist:
ensure_dir(d, mode)
def read_write_cmdline_url(target_fn):
if not os.path.exists(target_fn):
try:
(key, url, content) = get_cmdline_url()
except:
logexc(LOG, "Failed fetching command line url")
return
try:
if key and content:
write_file(target_fn, content, mode=0o600)
LOG.debug(("Wrote to %s with contents of command line"
" url %s (len=%s)"), target_fn, url, len(content))
elif key and not content:
LOG.debug(("Command line key %s with url"
" %s had no contents"), key, url)
except:
logexc(LOG, "Failed writing url content to %s", target_fn)
def yaml_dumps(obj, explicit_start=True, explicit_end=True):
return yaml.safe_dump(obj,
line_break="\n",
indent=4,
explicit_start=explicit_start,
explicit_end=explicit_end,
default_flow_style=False)
def ensure_dir(path, mode=None):
if not os.path.isdir(path):
# Make the dir and adjust the mode
with SeLinuxGuard(os.path.dirname(path), recursive=True):
os.makedirs(path)
chmod(path, mode)
else:
# Just adjust the mode
chmod(path, mode)
@contextlib.contextmanager
def unmounter(umount):
try:
yield umount
finally:
if umount:
umount_cmd = ["umount", umount]
subp(umount_cmd)
def mounts():
mounted = {}
try:
# Go through mounts to see what is already mounted
if os.path.exists("/proc/mounts"):
mount_locs = load_file("/proc/mounts").splitlines()
method = 'proc'
else:
(mountoutput, _err) = subp("mount")
mount_locs = mountoutput.splitlines()
method = 'mount'
mountre = r'^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$'
for mpline in mount_locs:
# Linux: /dev/sda1 on /boot type ext4 (rw,relatime,data=ordered)
# FreeBSD: /dev/vtbd0p2 on / (ufs, local, journaled soft-updates)
try:
if method == 'proc':
(dev, mp, fstype, opts, _freq, _passno) = mpline.split()
else:
m = re.search(mountre, mpline)
dev = m.group(1)
mp = m.group(2)
fstype = m.group(3)
opts = m.group(4)
except:
continue
# If the name of the mount point contains spaces these
# can be escaped as '\040', so undo that..
mp = mp.replace("\\040", " ")
mounted[dev] = {
'fstype': fstype,
'mountpoint': mp,
'opts': opts,
}
LOG.debug("Fetched %s mounts from %s", mounted, method)
except (IOError, OSError):
logexc(LOG, "Failed fetching mount points")
return mounted
def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True):
"""
Mount the device, call method 'callback' passing the directory
in which it was mounted, then unmount. Return whatever 'callback'
returned. If data != None, also pass data to callback.
mtype is a filesystem type. it may be a list, string (a single fsname)
or a list of fsnames.
"""
if isinstance(mtype, str):
mtypes = [mtype]
elif isinstance(mtype, (list, tuple)):
mtypes = list(mtype)
elif mtype is None:
mtypes = None
# clean up 'mtype' input a bit based on platform.
platsys = platform.system().lower()
if platsys == "linux":
if mtypes is None:
mtypes = ["auto"]
elif platsys.endswith("bsd"):
if mtypes is None:
mtypes = ['ufs', 'cd9660', 'vfat']
for index, mtype in enumerate(mtypes):
if mtype == "iso9660":
mtypes[index] = "cd9660"
else:
# we cannot do a smart "auto", so just call 'mount' once with no -t
mtypes = ['']
mounted = mounts()
with tempdir() as tmpd:
umount = False
if os.path.realpath(device) in mounted:
mountpoint = mounted[os.path.realpath(device)]['mountpoint']
else:
failure_reason = None
for mtype in mtypes:
mountpoint = None
try:
mountcmd = ['mount']
mountopts = []
if rw:
mountopts.append('rw')
else:
mountopts.append('ro')
if sync:
# This seems like the safe approach to do
# (ie where this is on by default)
mountopts.append("sync")
if mountopts:
mountcmd.extend(["-o", ",".join(mountopts)])
if mtype:
mountcmd.extend(['-t', mtype])
mountcmd.append(device)
mountcmd.append(tmpd)
subp(mountcmd)
umount = tmpd # This forces it to be unmounted (when set)
mountpoint = tmpd
break
except (IOError, OSError) as exc:
LOG.debug("Failed mount of '%s' as '%s': %s",
device, mtype, exc)
failure_reason = exc
if not mountpoint:
raise MountFailedError("Failed mounting %s to %s due to: %s" %
(device, tmpd, failure_reason))
# Be nice and ensure it ends with a slash
if not mountpoint.endswith("/"):
mountpoint += "/"
with unmounter(umount):
if data is None:
ret = callback(mountpoint)
else:
ret = callback(mountpoint, data)
return ret
def get_builtin_cfg():
# Deep copy so that others can't modify
return obj_copy.deepcopy(CFG_BUILTIN)
def sym_link(source, link, force=False):
LOG.debug("Creating symbolic link from %r => %r", link, source)
if force and os.path.exists(link):
del_file(link)
os.symlink(source, link)
def del_file(path):
LOG.debug("Attempting to remove %s", path)
try:
os.unlink(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise e
def copy(src, dest):
LOG.debug("Copying %s to %s", src, dest)
shutil.copy(src, dest)
def time_rfc2822():
try:
ts = time.strftime("%a, %d %b %Y %H:%M:%S %z", time.gmtime())
except:
ts = "??"
return ts
def uptime():
uptime_str = '??'
method = 'unknown'
try:
if os.path.exists("/proc/uptime"):
method = '/proc/uptime'
contents = load_file("/proc/uptime").strip()
if contents:
uptime_str = contents.split()[0]
else:
method = 'ctypes'
libc = ctypes.CDLL('/lib/libc.so.7')
size = ctypes.c_size_t()
buf = ctypes.c_int()
size.value = ctypes.sizeof(buf)
libc.sysctlbyname("kern.boottime", ctypes.byref(buf),
ctypes.byref(size), None, 0)
now = time.time()
bootup = buf.value
uptime_str = now - bootup
except:
logexc(LOG, "Unable to read uptime using method: %s" % method)
return uptime_str
def append_file(path, content):
write_file(path, content, omode="ab", mode=None)
def ensure_file(path, mode=0o644):
write_file(path, content='', omode="ab", mode=mode)
def safe_int(possible_int):
try:
return int(possible_int)
except (ValueError, TypeError):
return None
def chmod(path, mode):
real_mode = safe_int(mode)
if path and real_mode:
with SeLinuxGuard(path):
os.chmod(path, real_mode)
def write_file(filename, content, mode=0o644, omode="wb"):
"""
Writes a file with the given content and sets the file mode as specified.
Resotres the SELinux context if possible.
@param filename: The full path of the file to write.
@param content: The content to write to the file.
@param mode: The filesystem mode to set on the file.
@param omode: The open mode used when opening the file (w, wb, a, etc.)
"""
ensure_dir(os.path.dirname(filename))
if 'b' in omode.lower():
content = encode_text(content)
write_type = 'bytes'
else:
content = decode_binary(content)
write_type = 'characters'
LOG.debug("Writing to %s - %s: [%s] %s %s",
filename, omode, mode, len(content), write_type)
with SeLinuxGuard(path=filename):
with open(filename, omode) as fh:
fh.write(content)
fh.flush()
chmod(filename, mode)
def delete_dir_contents(dirname):
"""
Deletes all contents of a directory without deleting the directory itself.
@param dirname: The directory whose contents should be deleted.
"""
for node in os.listdir(dirname):
node_fullpath = os.path.join(dirname, node)
if os.path.isdir(node_fullpath):
del_dir(node_fullpath)
else:
del_file(node_fullpath)
def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
logstring=False):
if rcs is None:
rcs = [0]
try:
if not logstring:
LOG.debug(("Running command %s with allowed return codes %s"
" (shell=%s, capture=%s)"), args, rcs, shell, capture)
else:
LOG.debug(("Running hidden command to protect sensitive "
"input/output logstring: %s"), logstring)
if not capture:
stdout = None
stderr = None
else:
stdout = subprocess.PIPE
stderr = subprocess.PIPE
stdin = subprocess.PIPE
kws = dict(stdout=stdout, stderr=stderr, stdin=stdin,
env=env, shell=shell)
if six.PY3:
# Use this so subprocess output will be (Python 3) str, not bytes.
kws['universal_newlines'] = True
sp = subprocess.Popen(args, **kws)
(out, err) = sp.communicate(data)
except OSError as e:
raise ProcessExecutionError(cmd=args, reason=e)
rc = sp.returncode
if rc not in rcs:
raise ProcessExecutionError(stdout=out, stderr=err,
exit_code=rc,
cmd=args)
# Just ensure blank instead of none?? (iff capturing)
if not out and capture:
out = ''
if not err and capture:
err = ''
return (out, err)
def make_header(comment_char="#", base='created'):
ci_ver = version.version_string()
header = str(comment_char)
header += " %s by cloud-init v. %s" % (base.title(), ci_ver)
header += " on %s" % time_rfc2822()
return header
def abs_join(*paths):
return os.path.abspath(os.path.join(*paths))
# shellify, takes a list of commands
# for each entry in the list
# if it is an array, shell protect it (with single ticks)
# if it is a string, do nothing
def shellify(cmdlist, add_header=True):
content = ''
if add_header:
content += "#!/bin/sh\n"
escaped = "%s%s%s%s" % ("'", '\\', "'", "'")
cmds_made = 0
for args in cmdlist:
# If the item is a list, wrap all items in single tick.
# If its not, then just write it directly.
if isinstance(args, list):
fixed = []
for f in args:
fixed.append("'%s'" % (six.text_type(f).replace("'", escaped)))
content = "%s%s\n" % (content, ' '.join(fixed))
cmds_made += 1
elif isinstance(args, six.string_types):
content = "%s%s\n" % (content, args)
cmds_made += 1
else:
raise RuntimeError(("Unable to shellify type %s"
" which is not a list or string")
% (type_utils.obj_name(args)))
LOG.debug("Shellified %s commands.", cmds_made)
return content
def strip_prefix_suffix(line, prefix=None, suffix=None):
if prefix and line.startswith(prefix):
line = line[len(prefix):]
if suffix and line.endswith(suffix):
line = line[:-len(suffix)]
return line
def is_container():
"""
Checks to see if this code running in a container of some sort
"""
for helper in CONTAINER_TESTS:
try:
# try to run a helper program. if it returns true/zero
# then we're inside a container. otherwise, no
subp([helper])
return True
except (IOError, OSError):
pass
# this code is largely from the logic in
# ubuntu's /etc/init/container-detect.conf
try:
# Detect old-style libvirt
# Detect OpenVZ containers
pid1env = get_proc_env(1)
if "container" in pid1env:
return True
if "LIBVIRT_LXC_UUID" in pid1env:
return True
except (IOError, OSError):
pass
# Detect OpenVZ containers
if os.path.isdir("/proc/vz") and not os.path.isdir("/proc/bc"):
return True
try:
# Detect Vserver containers
lines = load_file("/proc/self/status").splitlines()
for line in lines:
if line.startswith("VxID:"):
(_key, val) = line.strip().split(":", 1)
if val != "0":
return True
except (IOError, OSError):
pass
return False
def get_proc_env(pid):
"""
Return the environment in a dict that a given process id was started with.
"""
env = {}
fn = os.path.join("/proc/", str(pid), "environ")
try:
contents = load_file(fn)
toks = contents.split("\x00")
for tok in toks:
if tok == "":
continue
(name, val) = tok.split("=", 1)
if name:
env[name] = val
except (IOError, OSError):
pass
return env
def keyval_str_to_dict(kvstring):
ret = {}
for tok in kvstring.split():
try:
(key, val) = tok.split("=", 1)
except ValueError:
key = tok
val = True
ret[key] = val
return ret
def is_partition(device):
if device.startswith("/dev/"):
device = device[5:]
return os.path.isfile("/sys/class/block/%s/partition" % device)
def expand_package_list(version_fmt, pkgs):
# we will accept tuples, lists of tuples, or just plain lists
if not isinstance(pkgs, list):
pkgs = [pkgs]
pkglist = []
for pkg in pkgs:
if isinstance(pkg, six.string_types):
pkglist.append(pkg)
continue
if isinstance(pkg, (tuple, list)):
if len(pkg) < 1 or len(pkg) > 2:
raise RuntimeError("Invalid package & version tuple.")
if len(pkg) == 2 and pkg[1]:
pkglist.append(version_fmt % tuple(pkg))
continue
pkglist.append(pkg[0])
else:
raise RuntimeError("Invalid package type.")
return pkglist
def parse_mount_info(path, mountinfo_lines, log=LOG):
"""Return the mount information for PATH given the lines from
/proc/$$/mountinfo."""
path_elements = [e for e in path.split('/') if e]
devpth = None
fs_type = None
match_mount_point = None
match_mount_point_elements = None
for i, line in enumerate(mountinfo_lines):
parts = line.split()
# Completely fail if there is anything in any line that is
# unexpected, as continuing to parse past a bad line could
# cause an incorrect result to be returned, so it's better
# return nothing than an incorrect result.
# The minimum number of elements in a valid line is 10.
if len(parts) < 10:
log.debug("Line %d has two few columns (%d): %s",
i + 1, len(parts), line)
return None
mount_point = parts[4]
mount_point_elements = [e for e in mount_point.split('/') if e]
# Ignore mounts deeper than the path in question.
if len(mount_point_elements) > len(path_elements):
continue
# Ignore mounts where the common path is not the same.
l = min(len(mount_point_elements), len(path_elements))
if mount_point_elements[0:l] != path_elements[0:l]:
continue
# Ignore mount points higher than an already seen mount
# point.
if (match_mount_point_elements is not None and
len(match_mount_point_elements) > len(mount_point_elements)):
continue
# Find the '-' which terminates a list of optional columns to
# find the filesystem type and the path to the device. See
# man 5 proc for the format of this file.
try:
i = parts.index('-')
except ValueError:
log.debug("Did not find column named '-' in line %d: %s",
i + 1, line)
return None
# Get the path to the device.
try:
fs_type = parts[i + 1]
devpth = parts[i + 2]
except IndexError:
log.debug("Too few columns after '-' column in line %d: %s",
i + 1, line)
return None
match_mount_point = mount_point
match_mount_point_elements = mount_point_elements
if devpth and fs_type and match_mount_point:
return (devpth, fs_type, match_mount_point)
else:
return None
def parse_mtab(path):
"""On older kernels there's no /proc/$$/mountinfo, so use mtab."""
for line in load_file("/etc/mtab").splitlines():
devpth, mount_point, fs_type = line.split()[:3]
if mount_point == path:
return devpth, fs_type, mount_point
return None
def parse_mount(path):
(mountoutput, _err) = subp("mount")
mount_locs = mountoutput.splitlines()
for line in mount_locs:
m = re.search(r'^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$', line)
devpth = m.group(1)
mount_point = m.group(2)
fs_type = m.group(3)
if mount_point == path:
return devpth, fs_type, mount_point
return None
def get_mount_info(path, log=LOG):
# Use /proc/$$/mountinfo to find the device where path is mounted.
# This is done because with a btrfs filesystem using os.stat(path)
# does not return the ID of the device.
#
# Here, / has a device of 18 (decimal).
#
# $ stat /
# File: '/'
# Size: 234 Blocks: 0 IO Block: 4096 directory
# Device: 12h/18d Inode: 256 Links: 1
# Access: (0755/drwxr-xr-x) Uid: ( 0/ root) Gid: ( 0/ root)
# Access: 2013-01-13 07:31:04.358011255 +0000
# Modify: 2013-01-13 18:48:25.930011255 +0000
# Change: 2013-01-13 18:48:25.930011255 +0000
# Birth: -
#
# Find where / is mounted:
#
# $ mount | grep ' / '
# /dev/vda1 on / type btrfs (rw,subvol=@,compress=lzo)
#
# And the device ID for /dev/vda1 is not 18:
#
# $ ls -l /dev/vda1
# brw-rw---- 1 root disk 253, 1 Jan 13 08:29 /dev/vda1
#
# So use /proc/$$/mountinfo to find the device underlying the
# input path.
mountinfo_path = '/proc/%s/mountinfo' % os.getpid()
if os.path.exists(mountinfo_path):
lines = load_file(mountinfo_path).splitlines()
return parse_mount_info(path, lines, log)
elif os.path.exists("/etc/mtab"):
return parse_mtab(path)
else:
return parse_mount(path)
def which(program):
# Return path of program for execution if found in path
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
_fpath, _ = os.path.split(program)
if _fpath:
if is_exe(program):
return program
else:
for path in os.environ.get("PATH", "").split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def log_time(logfunc, msg, func, args=None, kwargs=None, get_uptime=False):
if args is None:
args = []
if kwargs is None:
kwargs = {}
start = time.time()
ustart = None
if get_uptime:
try:
ustart = float(uptime())
except ValueError:
pass
try:
ret = func(*args, **kwargs)
finally:
delta = time.time() - start
udelta = None
if ustart is not None:
try:
udelta = float(uptime()) - ustart
except ValueError:
pass
tmsg = " took %0.3f seconds" % delta
if get_uptime:
if isinstance(udelta, (float)):
tmsg += " (%0.2f)" % udelta
else:
tmsg += " (N/A)"
try:
logfunc(msg + tmsg)
except:
pass
return ret
def expand_dotted_devname(dotted):
toks = dotted.rsplit(".", 1)
if len(toks) > 1:
return toks
else:
return (dotted, None)
def pathprefix2dict(base, required=None, optional=None, delim=os.path.sep):
# return a dictionary populated with keys in 'required' and 'optional'
# by reading files in prefix + delim + entry
if required is None:
required = []
if optional is None:
optional = []
missing = []
ret = {}
for f in required + optional:
try:
ret[f] = load_file(base + delim + f, quiet=False, decode=False)
except IOError as e:
if e.errno != errno.ENOENT:
raise
if f in required:
missing.append(f)
if len(missing):
raise ValueError("Missing required files: %s", ','.join(missing))
return ret
def read_meminfo(meminfo="/proc/meminfo", raw=False):
# read a /proc/meminfo style file and return
# a dict with 'total', 'free', and 'available'
mpliers = {'kB': 2 ** 10, 'mB': 2 ** 20, 'B': 1, 'gB': 2 ** 30}
kmap = {'MemTotal:': 'total', 'MemFree:': 'free',
'MemAvailable:': 'available'}
ret = {}
for line in load_file(meminfo).splitlines():
try:
key, value, unit = line.split()
except ValueError:
key, value = line.split()
unit = 'B'
if raw:
ret[key] = int(value) * mpliers[unit]
elif key in kmap:
ret[kmap[key]] = int(value) * mpliers[unit]
return ret
def human2bytes(size):
"""Convert human string or integer to size in bytes
10M => 10485760
.5G => 536870912
"""
size_in = size
if size.endswith("B"):
size = size[:-1]
mpliers = {'B': 1, 'K': 2 ** 10, 'M': 2 ** 20, 'G': 2 ** 30, 'T': 2 ** 40}
num = size
mplier = 'B'
for m in mpliers:
if size.endswith(m):
mplier = m
num = size[0:-len(m)]
try:
num = float(num)
except ValueError:
raise ValueError("'%s' is not valid input." % size_in)
if num < 0:
raise ValueError("'%s': cannot be negative" % size_in)
return int(num * mpliers[mplier])
def _read_dmi_syspath(key):
"""
Reads dmi data with from /sys/class/dmi/id
"""
if key not in DMIDECODE_TO_DMI_SYS_MAPPING:
return None
mapped_key = DMIDECODE_TO_DMI_SYS_MAPPING[key]
dmi_key_path = "{0}/{1}".format(DMI_SYS_PATH, mapped_key)
LOG.debug("querying dmi data %s", dmi_key_path)
try:
if not os.path.exists(dmi_key_path):
LOG.debug("did not find %s", dmi_key_path)
return None
key_data = load_file(dmi_key_path)
if not key_data:
LOG.debug("%s did not return any data", dmi_key_path)
return None
LOG.debug("dmi data %s returned %s", dmi_key_path, key_data)
return key_data.strip()
except Exception as e:
logexc(LOG, "failed read of %s", dmi_key_path)
return None
def _call_dmidecode(key, dmidecode_path):
"""
Calls out to dmidecode to get the data out. This is mostly for supporting
OS's without /sys/class/dmi/id support.
"""
try:
cmd = [dmidecode_path, "--string", key]
(result, _err) = subp(cmd)
LOG.debug("dmidecode returned '%s' for '%s'", result, key)
return result
except (IOError, OSError) as _err:
LOG.debug('failed dmidecode cmd: %s\n%s', cmd, _err.message)
return None
def read_dmi_data(key):
"""
Wrapper for reading DMI data.
This will do the following (returning the first that produces a
result):
1) Use a mapping to translate `key` from dmidecode naming to
sysfs naming and look in /sys/class/dmi/... for a value.
2) Use `key` as a sysfs key directly and look in /sys/class/dmi/...
3) Fall-back to passing `key` to `dmidecode --string`.
If all of the above fail to find a value, None will be returned.
"""
syspath_value = _read_dmi_syspath(key)
if syspath_value is not None:
return syspath_value
dmidecode_path = which('dmidecode')
if dmidecode_path:
return _call_dmidecode(key, dmidecode_path)
LOG.warn("did not find either path %s or dmidecode command",
DMI_SYS_PATH)
return None
def message_from_string(string):
if sys.version_info[:2] < (2, 7):
return email.message_from_file(six.StringIO(string))
return email.message_from_string(string)
|
kutenai/django | refs/heads/master | tests/migrations/migrations_test_apps/with_package_model/__init__.py | 12133432 | |
cbrewster/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/third_party/more-itertools/more_itertools/tests/__init__.py | 12133432 | |
jasonbot/django | refs/heads/master | django/conf/locale/da/__init__.py | 12133432 | |
radoraykov/rating-gov-representatives | refs/heads/master | apps/website/dashboard/__init__.py | 12133432 | |
rohitwaghchaure/erpnext-receipher | refs/heads/master | erpnext/hr/doctype/upload_attendance/__init__.py | 12133432 | |
bobobox/ansible | refs/heads/devel | lib/ansible/modules/network/panos/panos_service.py | 25 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: panos_service
short_description: create a service object
description:
- Create a service object. Service objects are fundamental representation of the applications given src/dst ports and protocol
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- pan-python
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device
required: true
password:
description:
- password for authentication
required: true
username:
description:
- username for authentication
required: false
default: "admin"
service_name:
description:
- name of the service
required: true
protocol:
description:
- protocol for the service, should be tcp or udp
required: true
port:
description:
- destination port
required: true
source_port:
description:
- source port
required: false
default: None
commit:
description:
- commit if changed
required: false
default: true
'''
EXAMPLES = '''
# Creates service for port 22
- name: create SSH service
panos_service:
ip_address: "192.168.1.1"
password: "admin"
service_name: "service-tcp-22"
protocol: "tcp"
port: "22"
'''
RETURN='''
# Default return values
'''
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import get_exception
try:
import pan.xapi
from pan.xapi import PanXapiError
HAS_LIB = True
except ImportError:
HAS_LIB = False
_SERVICE_XPATH = "/config/devices/entry[@name='localhost.localdomain']" +\
"/vsys/entry[@name='vsys1']" +\
"/service/entry[@name='%s']"
def service_exists(xapi, service_name):
xapi.get(_SERVICE_XPATH % service_name)
e = xapi.element_root.find('.//entry')
if e is None:
return False
return True
def add_service(xapi, module, service_name, protocol, port, source_port):
if service_exists(xapi, service_name):
return False
exml = ['<protocol>']
exml.append('<%s>' % protocol)
exml.append('<port>%s</port>' % port)
if source_port:
exml.append('<source-port>%s</source-port>' % source_port)
exml.append('</%s>' % protocol)
exml.append('</protocol>')
exml = ''.join(exml)
xapi.set(xpath=_SERVICE_XPATH % service_name, element=exml)
return True
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(required=True, no_log=True),
username=dict(default='admin'),
service_name=dict(required=True),
protocol=dict(required=True, choices=['tcp', 'udp']),
port=dict(required=True),
source_port=dict(),
commit=dict(type='bool', default=True)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_LIB:
module.fail_json(msg='pan-python is required for this module')
ip_address = module.params["ip_address"]
password = module.params["password"]
username = module.params['username']
service_name = module.params['service_name']
protocol = module.params['protocol']
port = module.params['port']
source_port = module.params['source_port']
commit = module.params['commit']
xapi = pan.xapi.PanXapi(
hostname=ip_address,
api_username=username,
api_password=password
)
try:
changed = add_service(xapi, module,
service_name,
protocol,
port,
source_port)
if changed and commit:
xapi.commit(cmd="<commit></commit>", sync=True, interval=1)
except PanXapiError:
exc = get_exception()
module.fail_json(msg=exc.message)
module.exit_json(changed=changed, msg="okey dokey")
if __name__ == '__main__':
main()
|
clownhood/mbok | refs/heads/master | contrib/bitrpc/bitrpc.py | 2348 | from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:8332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:8332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported" |
msmbuilder/osprey | refs/heads/master | basesetup.py | 4 | from __future__ import print_function, absolute_import
import os
import sys
import imp
import json
import string
import shutil
import subprocess
import tempfile
from distutils.dep_util import newer_group
from distutils.core import Extension
from distutils.errors import DistutilsExecError
from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler, get_config_vars
from distutils.command.build_ext import build_ext as _build_ext
def check_dependencies(dependencies):
def module_exists(dep):
try:
imp.find_module(dep)
return True
except ImportError:
return False
for dep in dependencies:
if len(dep) == 1:
import_name, pkg_name = dep[0], dep[0]
elif len(dep) == 2:
import_name, pkg_name = dep
else:
raise ValueError(dep)
if not module_exists(import_name):
lines = [
'-' * 50,
'Warning: This package requires %r. Try' % import_name,
'',
' $ conda install %s' % pkg_name,
'',
'or:',
'',
' $ pip install %s' % pkg_name,
'-' * 50,
]
print(os.linesep.join(lines), file=sys.stderr)
class CompilerDetection(object):
# Necessary for OSX. See https://github.com/mdtraj/mdtraj/issues/576
# The problem is that distutils.sysconfig.customize_compiler()
# is necessary to properly invoke the correct compiler for this class
# (otherwise the CC env variable isn't respected). Unfortunately,
# distutils.sysconfig.customize_compiler() DIES on OSX unless some
# appropriate initialization routines have been called. This line
# has a side effect of calling those initialzation routes, and is therefor
# necessary for OSX, even though we don't use the result.
_DONT_REMOVE_ME = get_config_vars()
def __init__(self, disable_openmp):
cc = new_compiler()
customize_compiler(cc)
self.msvc = cc.compiler_type == 'msvc'
self._print_compiler_version(cc)
if disable_openmp:
self.openmp_enabled = False
else:
self.openmp_enabled, openmp_needs_gomp = self._detect_openmp()
self.sse3_enabled = self._detect_sse3() if not self.msvc else True
self.sse41_enabled = self._detect_sse41() if not self.msvc else True
self.compiler_args_sse2 = ['-msse2'] if not self.msvc else ['/arch:SSE2']
self.compiler_args_sse3 = ['-mssse3'] if (self.sse3_enabled and not self.msvc) else []
self.compiler_args_sse41, self.define_macros_sse41 = [], []
if self.sse41_enabled:
self.define_macros_sse41 = [('__SSE4__', 1), ('__SSE4_1__', 1)]
if not self.msvc:
self.compiler_args_sse41 = ['-msse4']
if self.openmp_enabled:
self.compiler_libraries_openmp = []
if self.msvc:
self.compiler_args_openmp = ['/openmp']
else:
self.compiler_args_openmp = ['-fopenmp']
if openmp_needs_gomp:
self.compiler_libraries_openmp = ['gomp']
else:
self.compiler_libraries_openmp = []
self.compiler_args_openmp = []
if self.msvc:
self.compiler_args_opt = ['/O2']
else:
self.compiler_args_opt = ['-O3', '-funroll-loops']
print()
def _print_compiler_version(self, cc):
print("C compiler:")
try:
if self.msvc:
if not cc.initialized:
cc.initialize()
cc.spawn([cc.cc])
else:
cc.spawn([cc.compiler[0]] + ['-v'])
except DistutilsExecError:
pass
def hasfunction(self, funcname, include=None, libraries=None, extra_postargs=None):
# running in a separate subshell lets us prevent unwanted stdout/stderr
part1 = '''
from __future__ import print_function
import os
import json
from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler, get_config_vars
FUNCNAME = json.loads('%(funcname)s')
INCLUDE = json.loads('%(include)s')
LIBRARIES = json.loads('%(libraries)s')
EXTRA_POSTARGS = json.loads('%(extra_postargs)s')
''' % {
'funcname': json.dumps(funcname),
'include': json.dumps(include),
'libraries': json.dumps(libraries or []),
'extra_postargs': json.dumps(extra_postargs)}
part2 = '''
get_config_vars() # DON'T REMOVE ME
cc = new_compiler()
customize_compiler(cc)
for library in LIBRARIES:
cc.add_library(library)
status = 0
try:
with open('func.c', 'w') as f:
if INCLUDE is not None:
f.write('#include %s\\n' % INCLUDE)
f.write('int main(void) {\\n')
f.write(' %s;\\n' % FUNCNAME)
f.write('}\\n')
objects = cc.compile(['func.c'], output_dir='.',
extra_postargs=EXTRA_POSTARGS)
cc.link_executable(objects, 'a.out')
except Exception as e:
status = 1
exit(status)
'''
tmpdir = tempfile.mkdtemp(prefix='hasfunction-')
try:
curdir = os.path.abspath(os.curdir)
os.chdir(tmpdir)
with open('script.py', 'w') as f:
f.write(part1 + part2)
proc = subprocess.Popen(
[sys.executable, 'script.py'],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
proc.communicate()
status = proc.wait()
finally:
os.chdir(curdir)
shutil.rmtree(tmpdir)
return status == 0
def _print_support_start(self, feature):
print('Attempting to autodetect {0:6} support...'.format(feature), end=' ')
def _print_support_end(self, feature, status):
if status is True:
print('Compiler supports {0}'.format(feature))
else:
print('Did not detect {0} support'.format(feature))
def _detect_openmp(self):
self._print_support_start('OpenMP')
hasopenmp = self.hasfunction('omp_get_num_threads()', extra_postargs=['-fopenmp', '/openmp'])
needs_gomp = hasopenmp
if not hasopenmp:
hasopenmp = self.hasfunction('omp_get_num_threads()', libraries=['gomp'])
needs_gomp = hasopenmp
self._print_support_end('OpenMP', hasopenmp)
return hasopenmp, needs_gomp
def _detect_sse3(self):
"Does this compiler support SSE3 intrinsics?"
self._print_support_start('SSE3')
result = self.hasfunction('__m128 v; _mm_hadd_ps(v,v)',
include='<pmmintrin.h>',
extra_postargs=['-msse3'])
self._print_support_end('SSE3', result)
return result
def _detect_sse41(self):
"Does this compiler support SSE4.1 intrinsics?"
self._print_support_start('SSE4.1')
result = self.hasfunction( '__m128 v; _mm_round_ps(v,0x00)',
include='<smmintrin.h>',
extra_postargs=['-msse4'])
self._print_support_end('SSE4.1', result)
return result
def git_version():
# Return the git revision as a string
# copied from numpy setup.py
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(
cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = 'Unknown'
return GIT_REVISION
def write_version_py(version, isreleased, filename):
cnt = """
# This file is generated in setup.py at build time.
version = '{version}'
short_version = '{short_version}'
full_version = '{full_version}'
git_revision = '{git_revision}'
release = {release}
"""
# git_revision
if os.path.exists('.git'):
git_revision = git_version()
else:
git_revision = 'Unknown'
# short_version, full_version
if isreleased:
full_version = version
short_version = version
else:
full_version = ("{version}+{git_revision}"
.format(version=version, git_revision=git_revision))
short_version = version
with open(filename, 'w') as f:
f.write(cnt.format(version=version,
short_version=short_version,
full_version=full_version,
git_revision=git_revision,
release=isreleased))
class StaticLibrary(Extension):
def __init__(self, *args, **kwargs):
self.export_include = kwargs.pop('export_include', [])
Extension.__init__(self, *args, **kwargs)
class build_ext(_build_ext):
def build_extension(self, ext):
if isinstance(ext, StaticLibrary):
self.build_static_extension(ext)
else:
_build_ext.build_extension(self, ext)
def build_static_extension(self, ext):
from distutils import log
sources = ext.sources
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError(
("in 'ext_modules' option (extension '%s'), " +
"'sources' must be present and must be " +
"a list of source filenames") % ext.name)
sources = list(sources)
ext_path = self.get_ext_fullpath(ext.name)
depends = sources + ext.depends
if not (self.force or newer_group(depends, ext_path, 'newer')):
log.debug("skipping '%s' extension (up-to-date)", ext.name)
return
else:
log.info("building '%s' extension", ext.name)
extra_args = ext.extra_compile_args or []
macros = ext.define_macros[:]
for undef in ext.undef_macros:
macros.append((undef,))
objects = self.compiler.compile(sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=ext.include_dirs,
debug=self.debug,
extra_postargs=extra_args,
depends=ext.depends)
self._built_objects = objects[:]
if ext.extra_objects:
objects.extend(ext.extra_objects)
extra_args = ext.extra_link_args or []
language = ext.language or self.compiler.detect_language(sources)
libname = os.path.splitext(os.path.basename(ext_path))[0]
output_dir = os.path.dirname(ext_path)
if (self.compiler.static_lib_format.startswith('lib') and
libname.startswith('lib')):
libname = libname[3:]
if not os.path.exists(output_dir):
# necessary for windows
os.makedirs(output_dir)
self.compiler.create_static_lib(objects,
output_libname=libname,
output_dir=output_dir,
target_lang=language)
for item in ext.export_include:
shutil.copy(item, output_dir)
|
junmin-zhu/chromium-rivertrail | refs/heads/v8-binding | third_party/mesa/MesaLib/src/gallium/tests/python/samples/gs.py | 32 | #!/usr/bin/env python
##########################################################################
#
# Copyright 2009 VMware
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sub license, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice (including the
# next paragraph) shall be included in all copies or substantial portions
# of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
# IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
##########################################################################
from gallium import *
def make_image(surface):
data = surface.get_tile_rgba8(0, 0, surface.width, surface.height)
import Image
outimage = Image.fromstring('RGBA', (surface.width, surface.height), data, "raw", 'RGBA', 0, 1)
return outimage
def save_image(filename, surface):
outimage = make_image(surface)
outimage.save(filename, "PNG")
def show_image(surface):
outimage = make_image(surface)
import Tkinter as tk
from PIL import Image, ImageTk
root = tk.Tk()
root.title('background image')
image1 = ImageTk.PhotoImage(outimage)
w = image1.width()
h = image1.height()
x = 100
y = 100
root.geometry("%dx%d+%d+%d" % (w, h, x, y))
panel1 = tk.Label(root, image=image1)
panel1.pack(side='top', fill='both', expand='yes')
panel1.image = image1
root.mainloop()
def test(dev):
ctx = dev.context_create()
width = 255
height = 255
minz = 0.0
maxz = 1.0
# disabled blending/masking
blend = Blend()
blend.rt[0].rgb_src_factor = PIPE_BLENDFACTOR_ONE
blend.rt[0].alpha_src_factor = PIPE_BLENDFACTOR_ONE
blend.rt[0].rgb_dst_factor = PIPE_BLENDFACTOR_ZERO
blend.rt[0].alpha_dst_factor = PIPE_BLENDFACTOR_ZERO
blend.rt[0].colormask = PIPE_MASK_RGBA
ctx.set_blend(blend)
# depth/stencil/alpha
depth_stencil_alpha = DepthStencilAlpha()
depth_stencil_alpha.depth.enabled = 1
depth_stencil_alpha.depth.writemask = 1
depth_stencil_alpha.depth.func = PIPE_FUNC_LESS
ctx.set_depth_stencil_alpha(depth_stencil_alpha)
# rasterizer
rasterizer = Rasterizer()
rasterizer.front_winding = PIPE_WINDING_CW
rasterizer.cull_mode = PIPE_WINDING_NONE
rasterizer.scissor = 1
ctx.set_rasterizer(rasterizer)
# viewport
viewport = Viewport()
scale = FloatArray(4)
scale[0] = width / 2.0
scale[1] = -height / 2.0
scale[2] = (maxz - minz) / 2.0
scale[3] = 1.0
viewport.scale = scale
translate = FloatArray(4)
translate[0] = width / 2.0
translate[1] = height / 2.0
translate[2] = (maxz - minz) / 2.0
translate[3] = 0.0
viewport.translate = translate
ctx.set_viewport(viewport)
# samplers
sampler = Sampler()
sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE
sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE
sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE
sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE
sampler.min_img_filter = PIPE_TEX_MIPFILTER_NEAREST
sampler.mag_img_filter = PIPE_TEX_MIPFILTER_NEAREST
sampler.normalized_coords = 1
ctx.set_sampler(0, sampler)
# scissor
scissor = Scissor()
scissor.minx = 0
scissor.miny = 0
scissor.maxx = width
scissor.maxy = height
ctx.set_scissor(scissor)
clip = Clip()
clip.nr = 0
ctx.set_clip(clip)
# framebuffer
cbuf = dev.resource_create(
PIPE_FORMAT_B8G8R8X8_UNORM,
width, height,
bind=PIPE_BIND_RENDER_TARGET,
).get_surface()
zbuf = dev.resource_create(
PIPE_FORMAT_Z32_UNORM,
width, height,
bind=PIPE_BIND_DEPTH_STENCIL,
).get_surface()
fb = Framebuffer()
fb.width = width
fb.height = height
fb.nr_cbufs = 1
fb.set_cbuf(0, cbuf)
fb.set_zsbuf(zbuf)
ctx.set_framebuffer(fb)
rgba = FloatArray(4);
rgba[0] = 0.0
rgba[1] = 0.0
rgba[2] = 0.0
rgba[3] = 0.0
ctx.clear(PIPE_CLEAR_COLOR | PIPE_CLEAR_DEPTHSTENCIL, rgba, 1.0, 0xff)
# vertex shader
vs = Shader('''
VERT
DCL IN[0], POSITION, CONSTANT
DCL IN[1], COLOR, CONSTANT
DCL OUT[0], POSITION, CONSTANT
DCL OUT[1], COLOR, CONSTANT
0:MOV OUT[0], IN[0]
1:MOV OUT[1], IN[1]
2:END
''')
ctx.set_vertex_shader(vs)
gs = Shader('''
GEOM
PROPERTY GS_INPUT_PRIMITIVE TRIANGLES
PROPERTY GS_OUTPUT_PRIMITIVE TRIANGLE_STRIP
DCL IN[][0], POSITION, CONSTANT
DCL IN[][1], COLOR, CONSTANT
DCL OUT[0], POSITION, CONSTANT
DCL OUT[1], COLOR, CONSTANT
0:MOV OUT[0], IN[0][0]
1:MOV OUT[1], IN[0][1]
2:EMIT
3:MOV OUT[0], IN[1][0]
4:MOV OUT[1], IN[1][1]
5:EMIT
6:MOV OUT[0], IN[2][0]
7:MOV OUT[1], IN[2][1]
8:EMIT
9:ENDPRIM
10:END
''')
ctx.set_geometry_shader(gs)
# fragment shader
fs = Shader('''
FRAG
DCL IN[0], COLOR, LINEAR
DCL OUT[0], COLOR, CONSTANT
0:MOV OUT[0], IN[0]
1:END
''')
ctx.set_fragment_shader(fs)
nverts = 3
nattrs = 2
verts = FloatArray(nverts * nattrs * 4)
verts[ 0] = 0.0 # x1
verts[ 1] = 0.8 # y1
verts[ 2] = 0.2 # z1
verts[ 3] = 1.0 # w1
verts[ 4] = 1.0 # r1
verts[ 5] = 0.0 # g1
verts[ 6] = 0.0 # b1
verts[ 7] = 1.0 # a1
verts[ 8] = -0.8 # x2
verts[ 9] = -0.8 # y2
verts[10] = 0.5 # z2
verts[11] = 1.0 # w2
verts[12] = 0.0 # r2
verts[13] = 1.0 # g2
verts[14] = 0.0 # b2
verts[15] = 1.0 # a2
verts[16] = 0.8 # x3
verts[17] = -0.8 # y3
verts[18] = 0.8 # z3
verts[19] = 1.0 # w3
verts[20] = 0.0 # r3
verts[21] = 0.0 # g3
verts[22] = 1.0 # b3
verts[23] = 1.0 # a3
ctx.draw_vertices(PIPE_PRIM_TRIANGLES,
nverts,
nattrs,
verts)
ctx.flush()
show_image(cbuf)
#show_image(zbuf)
#save_image('cbuf.png', cbuf)
#save_image('zbuf.png', zbuf)
def main():
dev = Device()
test(dev)
if __name__ == '__main__':
main()
|
cfig/Android_boot_image_editor | refs/heads/master | tools/free.py | 2 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
import subprocess
def run(cmd):
print(cmd)
subprocess.check_call(cmd, shell = True)
run("touch vbmeta.img")
run("gradle pull")
run("gradle unpack")
run('vim -u NONE -N build/unzip_boot/vbmeta.avb.json -c ":19s/0/2/g" -c ":wq"')
run("gradle pack")
run("gradle flash")
|
eptmp3/Sick-Beard | refs/heads/development | lib/imdb/Character.py | 128 | """
Character module (imdb package).
This module provides the Character class, used to store information about
a given character.
Copyright 2007-2010 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from copy import deepcopy
from imdb.utils import analyze_name, build_name, flatten, _Container, cmpPeople
class Character(_Container):
"""A Character.
Every information about a character can be accessed as:
characterObject['information']
to get a list of the kind of information stored in a
Character object, use the keys() method; some useful aliases
are defined (as "also known as" for the "akas" key);
see the keys_alias dictionary.
"""
# The default sets of information retrieved.
default_info = ('main', 'filmography', 'biography')
# Aliases for some not-so-intuitive keys.
keys_alias = {'mini biography': 'biography',
'bio': 'biography',
'character biography': 'biography',
'character biographies': 'biography',
'biographies': 'biography',
'character bio': 'biography',
'aka': 'akas',
'also known as': 'akas',
'alternate names': 'akas',
'personal quotes': 'quotes',
'keys': 'keywords',
'keyword': 'keywords'}
keys_tomodify_list = ('biography', 'quotes')
cmpFunct = cmpPeople
def _init(self, **kwds):
"""Initialize a Character object.
*characterID* -- the unique identifier for the character.
*name* -- the name of the Character, if not in the data dictionary.
*myName* -- the nickname you use for this character.
*myID* -- your personal id for this character.
*data* -- a dictionary used to initialize the object.
*notes* -- notes about the given character.
*accessSystem* -- a string representing the data access system used.
*titlesRefs* -- a dictionary with references to movies.
*namesRefs* -- a dictionary with references to persons.
*charactersRefs* -- a dictionary with references to characters.
*modFunct* -- function called returning text fields.
"""
name = kwds.get('name')
if name and not self.data.has_key('name'):
self.set_name(name)
self.characterID = kwds.get('characterID', None)
self.myName = kwds.get('myName', u'')
def _reset(self):
"""Reset the Character object."""
self.characterID = None
self.myName = u''
def set_name(self, name):
"""Set the name of the character."""
# XXX: convert name to unicode, if it's a plain string?
try:
d = analyze_name(name, canonical=0)
self.data.update(d)
except:
# TODO: catch only IMDbPYParserError and issue a warning.
pass
def _additional_keys(self):
"""Valid keys to append to the data.keys() list."""
addkeys = []
if self.data.has_key('name'):
addkeys += ['long imdb name']
if self.data.has_key('headshot'):
addkeys += ['full-size headshot']
return addkeys
def _getitem(self, key):
"""Handle special keys."""
## XXX: can a character have an imdbIndex?
if self.data.has_key('name'):
if key == 'long imdb name':
return build_name(self.data)
if key == 'full-size headshot' and self.data.has_key('headshot'):
return self._re_fullsizeURL.sub('', self.data.get('headshot', ''))
return None
def getID(self):
"""Return the characterID."""
return self.characterID
def __nonzero__(self):
"""The Character is "false" if the self.data does not contain a name."""
# XXX: check the name and the characterID?
if self.data.get('name'): return 1
return 0
def __contains__(self, item):
"""Return true if this Character was portrayed in the given Movie
or it was impersonated by the given Person."""
from Movie import Movie
from Person import Person
if isinstance(item, Person):
for m in flatten(self.data, yieldDictKeys=1, scalar=Movie):
if item.isSame(m.currentRole):
return 1
elif isinstance(item, Movie):
for m in flatten(self.data, yieldDictKeys=1, scalar=Movie):
if item.isSame(m):
return 1
return 0
def isSameName(self, other):
"""Return true if two character have the same name
and/or characterID."""
if not isinstance(other, self.__class__):
return 0
if self.data.has_key('name') and \
other.data.has_key('name') and \
build_name(self.data, canonical=0) == \
build_name(other.data, canonical=0):
return 1
if self.accessSystem == other.accessSystem and \
self.characterID is not None and \
self.characterID == other.characterID:
return 1
return 0
isSameCharacter = isSameName
def __deepcopy__(self, memo):
"""Return a deep copy of a Character instance."""
c = Character(name=u'', characterID=self.characterID,
myName=self.myName, myID=self.myID,
data=deepcopy(self.data, memo),
notes=self.notes, accessSystem=self.accessSystem,
titlesRefs=deepcopy(self.titlesRefs, memo),
namesRefs=deepcopy(self.namesRefs, memo),
charactersRefs=deepcopy(self.charactersRefs, memo))
c.current_info = list(self.current_info)
c.set_mod_funct(self.modFunct)
return c
def __repr__(self):
"""String representation of a Character object."""
r = '<Character id:%s[%s] name:_%s_>' % (self.characterID,
self.accessSystem,
self.get('name'))
if isinstance(r, unicode): r = r.encode('utf_8', 'replace')
return r
def __str__(self):
"""Simply print the short name."""
return self.get('name', u'').encode('utf_8', 'replace')
def __unicode__(self):
"""Simply print the short title."""
return self.get('name', u'')
def summary(self):
"""Return a string with a pretty-printed summary for the character."""
if not self: return u''
s = u'Character\n=====\nName: %s\n' % \
self.get('name', u'')
bio = self.get('biography')
if bio:
s += u'Biography: %s\n' % bio[0]
filmo = self.get('filmography')
if filmo:
a_list = [x.get('long imdb canonical title', u'')
for x in filmo[:5]]
s += u'Last movies with this character: %s.\n' % u'; '.join(a_list)
return s
|
NeCTAR-RC/nova | refs/heads/nectar/mitaka | nova/pci/stats.py | 7 | # Copyright (c) 2013 Intel, Inc.
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_log import log as logging
import six
from nova import exception
from nova.i18n import _LE
from nova import objects
from nova.objects import fields
from nova.objects import pci_device_pool
from nova.pci import utils
from nova.pci import whitelist
LOG = logging.getLogger(__name__)
class PciDeviceStats(object):
"""PCI devices summary information.
According to the PCI SR-IOV spec, a PCI physical function can have up to
256 PCI virtual functions, thus the number of assignable PCI functions in
a cloud can be big. The scheduler needs to know all device availability
information in order to determine which compute hosts can support a PCI
request. Passing individual virtual device information to the scheduler
does not scale, so we provide summary information.
Usually the virtual functions provided by a host PCI device have the same
value for most properties, like vendor_id, product_id and class type.
The PCI stats class summarizes this information for the scheduler.
The pci stats information is maintained exclusively by compute node
resource tracker and updated to database. The scheduler fetches the
information and selects the compute node accordingly. If a compute
node is selected, the resource tracker allocates the devices to the
instance and updates the pci stats information.
This summary information will be helpful for cloud management also.
"""
pool_keys = ['product_id', 'vendor_id', 'numa_node', 'dev_type']
def __init__(self, stats=None):
super(PciDeviceStats, self).__init__()
# NOTE(sbauza): Stats are a PCIDevicePoolList object
self.pools = [pci_pool.to_dict()
for pci_pool in stats] if stats else []
self.pools.sort(key=lambda item: len(item))
def _equal_properties(self, dev, entry, matching_keys):
return all(dev.get(prop) == entry.get(prop)
for prop in matching_keys)
def _find_pool(self, dev_pool):
"""Return the first pool that matches dev."""
for pool in self.pools:
pool_keys = pool.copy()
del pool_keys['count']
del pool_keys['devices']
if (len(pool_keys.keys()) == len(dev_pool.keys()) and
self._equal_properties(dev_pool, pool_keys, dev_pool.keys())):
return pool
def _create_pool_keys_from_dev(self, dev):
"""create a stats pool dict that this dev is supposed to be part of
Note that this pool dict contains the stats pool's keys and their
values. 'count' and 'devices' are not included.
"""
# Don't add a device that doesn't have a matching device spec.
# This can happen during initial sync up with the controller
devspec = whitelist.get_pci_device_devspec(dev)
if not devspec:
return
tags = devspec.get_tags()
pool = {k: getattr(dev, k) for k in self.pool_keys}
if tags:
pool.update(tags)
return pool
def add_device(self, dev):
"""Add a device to its matching pool."""
dev_pool = self._create_pool_keys_from_dev(dev)
if dev_pool:
pool = self._find_pool(dev_pool)
if not pool:
dev_pool['count'] = 0
dev_pool['devices'] = []
self.pools.append(dev_pool)
self.pools.sort(key=lambda item: len(item))
pool = dev_pool
pool['count'] += 1
pool['devices'].append(dev)
@staticmethod
def _decrease_pool_count(pool_list, pool, count=1):
"""Decrement pool's size by count.
If pool becomes empty, remove pool from pool_list.
"""
if pool['count'] > count:
pool['count'] -= count
count = 0
else:
count -= pool['count']
pool_list.remove(pool)
return count
def remove_device(self, dev):
"""Remove one device from the first pool that it matches."""
dev_pool = self._create_pool_keys_from_dev(dev)
if dev_pool:
pool = self._find_pool(dev_pool)
if not pool:
raise exception.PciDevicePoolEmpty(
compute_node_id=dev.compute_node_id, address=dev.address)
pool['devices'].remove(dev)
self._decrease_pool_count(self.pools, pool)
def get_free_devs(self):
free_devs = []
for pool in self.pools:
free_devs.extend(pool['devices'])
return free_devs
def consume_requests(self, pci_requests, numa_cells=None):
alloc_devices = []
for request in pci_requests:
count = request.count
spec = request.spec
# For now, keep the same algorithm as during scheduling:
# a spec may be able to match multiple pools.
pools = self._filter_pools_for_spec(self.pools, spec)
if numa_cells:
pools = self._filter_pools_for_numa_cells(pools, numa_cells)
pools = self._filter_non_requested_pfs(request, pools)
# Failed to allocate the required number of devices
# Return the devices already allocated back to their pools
if sum([pool['count'] for pool in pools]) < count:
LOG.error(_LE("Failed to allocate PCI devices for instance."
" Unassigning devices back to pools."
" This should not happen, since the scheduler"
" should have accurate information, and allocation"
" during claims is controlled via a hold"
" on the compute node semaphore"))
for d in range(len(alloc_devices)):
self.add_device(alloc_devices.pop())
return None
for pool in pools:
if pool['count'] >= count:
num_alloc = count
else:
num_alloc = pool['count']
count -= num_alloc
pool['count'] -= num_alloc
for d in range(num_alloc):
pci_dev = pool['devices'].pop()
self._handle_device_dependents(pci_dev)
pci_dev.request_id = request.request_id
alloc_devices.append(pci_dev)
if count == 0:
break
return alloc_devices
def _handle_device_dependents(self, pci_dev):
"""Remove device dependents or a parent from pools.
In case the device is a PF, all of it's dependent VFs should
be removed from pools count, if these are present.
When the device is a VF, it's parent PF pool count should be
decreased, unless it is no longer in a pool.
"""
if pci_dev.dev_type == fields.PciDeviceType.SRIOV_PF:
vfs_list = objects.PciDeviceList.get_by_parent_address(
pci_dev._context,
pci_dev.compute_node_id,
pci_dev.address)
if vfs_list:
for vf in vfs_list:
self.remove_device(vf)
elif pci_dev.dev_type == fields.PciDeviceType.SRIOV_VF:
try:
parent = pci_dev.get_by_dev_addr(pci_dev._context,
pci_dev.compute_node_id,
pci_dev.parent_addr)
# Make sure not to decrease PF pool count if this parent has
# been already removed from pools
if parent in self.get_free_devs():
self.remove_device(parent)
except exception.PciDeviceNotFound:
return
@staticmethod
def _filter_pools_for_spec(pools, request_specs):
return [pool for pool in pools
if utils.pci_device_prop_match(pool, request_specs)]
@staticmethod
def _filter_pools_for_numa_cells(pools, numa_cells):
# Some systems don't report numa node info for pci devices, in
# that case None is reported in pci_device.numa_node, by adding None
# to numa_cells we allow assigning those devices to instances with
# numa topology
numa_cells = [None] + [cell.id for cell in numa_cells]
# filter out pools which numa_node is not included in numa_cells
return [pool for pool in pools if any(utils.pci_device_prop_match(
pool, [{'numa_node': cell}])
for cell in numa_cells)]
def _filter_non_requested_pfs(self, request, matching_pools):
# Remove SRIOV_PFs from pools, unless it has been explicitly requested
# This is especially needed in cases where PFs and VFs has the same
# product_id.
if all(spec.get('dev_type') != fields.PciDeviceType.SRIOV_PF for
spec in request.spec):
matching_pools = self._filter_pools_for_pfs(matching_pools)
return matching_pools
@staticmethod
def _filter_pools_for_pfs(pools):
return [pool for pool in pools
if not pool.get('dev_type') == fields.PciDeviceType.SRIOV_PF]
def _apply_request(self, pools, request, numa_cells=None):
# NOTE(vladikr): This code maybe open to race conditions.
# Two concurrent requests may succeed when called support_requests
# because this method does not remove related devices from the pools
count = request.count
matching_pools = self._filter_pools_for_spec(pools, request.spec)
if numa_cells:
matching_pools = self._filter_pools_for_numa_cells(matching_pools,
numa_cells)
matching_pools = self._filter_non_requested_pfs(request,
matching_pools)
if sum([pool['count'] for pool in matching_pools]) < count:
return False
else:
for pool in matching_pools:
count = self._decrease_pool_count(pools, pool, count)
if not count:
break
return True
def support_requests(self, requests, numa_cells=None):
"""Check if the pci requests can be met.
Scheduler checks compute node's PCI stats to decide if an
instance can be scheduled into the node. Support does not
mean real allocation.
If numa_cells is provided then only devices contained in
those nodes are considered.
"""
# note (yjiang5): this function has high possibility to fail,
# so no exception should be triggered for performance reason.
pools = copy.deepcopy(self.pools)
return all([self._apply_request(pools, r, numa_cells)
for r in requests])
def apply_requests(self, requests, numa_cells=None):
"""Apply PCI requests to the PCI stats.
This is used in multiple instance creation, when the scheduler has to
maintain how the resources are consumed by the instances.
If numa_cells is provided then only devices contained in
those nodes are considered.
"""
if not all([self._apply_request(self.pools, r, numa_cells)
for r in requests]):
raise exception.PciDeviceRequestFailed(requests=requests)
def __iter__(self):
# 'devices' shouldn't be part of stats
pools = []
for pool in self.pools:
tmp = {k: v for k, v in six.iteritems(pool) if k != 'devices'}
pools.append(tmp)
return iter(pools)
def clear(self):
"""Clear all the stats maintained."""
self.pools = []
def __eq__(self, other):
return cmp(self.pools, other.pools) == 0
def __ne__(self, other):
return not (self == other)
def to_device_pools_obj(self):
"""Return the contents of the pools as a PciDevicePoolList object."""
stats = [x for x in self]
return pci_device_pool.from_pci_stats(stats)
|
Vixionar/django | refs/heads/master | tests/template_tests/filter_tests/test_length.py | 521 | from django.template.defaultfilters import length
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class LengthTests(SimpleTestCase):
@setup({'length01': '{{ list|length }}'})
def test_length01(self):
output = self.engine.render_to_string('length01', {'list': ['4', None, True, {}]})
self.assertEqual(output, '4')
@setup({'length02': '{{ list|length }}'})
def test_length02(self):
output = self.engine.render_to_string('length02', {'list': []})
self.assertEqual(output, '0')
@setup({'length03': '{{ string|length }}'})
def test_length03(self):
output = self.engine.render_to_string('length03', {'string': ''})
self.assertEqual(output, '0')
@setup({'length04': '{{ string|length }}'})
def test_length04(self):
output = self.engine.render_to_string('length04', {'string': 'django'})
self.assertEqual(output, '6')
@setup({'length05': '{% if string|length == 6 %}Pass{% endif %}'})
def test_length05(self):
output = self.engine.render_to_string('length05', {'string': mark_safe('django')})
self.assertEqual(output, 'Pass')
# Invalid uses that should fail silently.
@setup({'length06': '{{ int|length }}'})
def test_length06(self):
output = self.engine.render_to_string('length06', {'int': 7})
self.assertEqual(output, '0')
@setup({'length07': '{{ None|length }}'})
def test_length07(self):
output = self.engine.render_to_string('length07', {'None': None})
self.assertEqual(output, '0')
class FunctionTests(SimpleTestCase):
def test_string(self):
self.assertEqual(length('1234'), 4)
def test_safestring(self):
self.assertEqual(length(mark_safe('1234')), 4)
def test_list(self):
self.assertEqual(length([1, 2, 3, 4]), 4)
|
Nictec/nictec_website2.0 | refs/heads/master | nictecsite/storage/migrations/0007_assignment_type.py | 1 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-09-16 06:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('storage', '0006_auto_20160911_1944'),
]
operations = [
migrations.AddField(
model_name='assignment',
name='Type',
field=models.CharField(choices=[('R', 'Rental'), ('FS', 'Full Service')], default='Rental', max_length=2),
),
]
|
acislab/HuMaIN_Microservices | refs/heads/master | OCRopyServices/RecognitionService/ocrolib/sl.py | 16 | ################################################################
### utilities for lists of slices, treating them like rectangles
################################################################
import numpy
from pylab import mean
### inquiry functions
def is_slices(u):
for s in u:
if type(s)!=slice: return False
return True
def dims(s):
"""List of dimensions of the slice list."""
return tuple([x.stop-x.start for x in s])
def dim(s,i):
"""Dimension of the slice list for index i."""
return s[i].stop-s[i].start
def dim0(s):
"""Dimension of the slice list for dimension 0."""
return s[0].stop-s[0].start
def dim1(s):
"""Dimension of the slice list for dimension 1."""
return s[1].stop-s[1].start
def raster(u):
"""Return (row0,row1,col0,col1)."""
return (u[0].start,u[0].stop,u[1].start,u[1].stop)
def box(r0,r1,c0,c1):
return (slice(r0,r1),slice(c0,c1))
def start(u):
return tuple([x.start for x in u])
def stop(u):
return tuple([x.stop for x in u])
def bounds(a):
"""Return a list of slices corresponding to the array bounds."""
return tuple([slice(0,a.shape[i]) for i in range(a.ndim)])
def volume(a):
"""Return the area of the slice list."""
return numpy.prod([max(x.stop-x.start,0) for x in a])
def empty(a):
"""Test whether the slice is empty."""
return a is None or volume(a)==0
def shift(u,offsets,scale=1):
u = list(u)
for i in range(len(offsets)):
u[i] = slice(u[i].start+scale*offsets[i],u[i].stop+scale*offsets[i])
return tuple(u)
### These are special because they only operate on the first two
### dimensions. That's useful for RGB images.
def area(a):
"""Return the area of the slice list (ignores anything past a[:2]."""
return numpy.prod([max(x.stop-x.start,0) for x in a[:2]])
def aspect(a):
return height(a)*1.0/width(a)
### Geometric operations.
def pad(u,d):
"""Pad the slice list by the given amount."""
return tuple([slice(u[i].start-d,u[i].stop+d) for i in range(len(u))])
def union(u,v):
"""Compute the union of the two slice lists."""
if u is None: return v
if v is None: return u
return tuple([slice(min(u[i].start,v[i].start),max(u[i].stop,v[i].stop)) for i in range(len(u))])
def intersect(u,v):
"""Compute the intersection of the two slice lists."""
if u is None: return v
if v is None: return u
return tuple([slice(max(u[i].start,v[i].start),min(u[i].stop,v[i].stop)) for i in range(len(u))])
def xoverlap(u,v):
return max(0,min(u[1].stop,v[1].stop)-max(u[1].start,v[1].start))
def yoverlap(u,v):
return max(0,min(u[0].stop,v[0].stop)-max(u[0].start,v[0].start))
def xoverlap_rel(u,v):
return xoverlap(u,v)*1.0/max(1,width(u),width(v))
def yoverlap_rel(u,v):
return yoverlap(u,v)*1.0/max(1,height(u),height(v))
def xoverlaps(u,v):
return u[1].stop>=v[1].start and v[1].stop>=u[1].start
def yoverlaps(u,v):
return u[0].stop>=v[0].start and v[0].stop>=u[0].start
def center1(s):
return mean([s[1].stop,s[1].start])
def center0(s):
return mean([s[0].stop,s[0].start])
def xcenter(s):
return mean([s[1].stop,s[1].start])
def ycenter(s):
return mean([s[0].stop,s[0].start])
def center(s):
return (ycenter(s),xcenter(s))
def center_in(u,v):
y,x = ycenter(u),xcenter(u)
return y>=v[0].start and y<=v[0].stop and x>=v[1].start and x<=v[1].stop
def width(s):
return s[1].stop-s[1].start
def height(s):
return s[0].stop-s[0].start
### Functions with mathematical coordinate conventions
def mbox(x0,y0,x1,y1,h):
return (slice(h-y1-1,h-y0-1),slice(x0,x1))
def raster_FIXME(u):
"""Return (y0,x0,y1,x1) for the given height."""
return (u[0].start,u[1].start,u[0].stop,u[1].stop)
def math(u,h):
"""Return (x0,y0,x1,y1) for the given height."""
return (u[1].start,h-u[0].stop-1,u[1].stop,h-u[0].start-1)
### Image-related
def extend_to(slices,image):
if image.ndim==len(slices):
return slices
slices = list(slices)
slices = slices + bounds(image)[len(slices):]
return tuple(slices)
def cut(image,box,margin=0,bg=0,dtype=None):
"""Cut out a region given by a box (row0,col0,row1,col1),
with an optional margin."""
assert len(box)==2 and is_slices(box)
if dtype is None: dtype = image.dtype
if image.ndim==3:
assert image.shape[2]==3
result = [cut(image[:,:,i],box,margin,bg,dtype) for i in range(image.shape[2])]
result = numpy.transpose(result,[1,2,0])
return result
elif image.ndim==2:
box = pad(box,margin)
cbox = intersect(box,bounds(image))
if empty(cbox):
result = numpy.empty(dims(box),dtype=dtype)
result.ravel()[:] = bg
return result
cimage = image[cbox]
if cbox==box:
return cimage
else:
if dtype is None: dtype = image.dtype
result = numpy.empty(dims(box),dtype=dtype)
result.ravel()[:] = bg
moved = shift(cbox,start(box),-1)
result[moved] = cimage
return result
else:
raise Exception("not implemented for ndim!=2 or ndim!=3")
|
CYBAI/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/third_party/certifi/certifi/__init__.py | 59 | from .core import where, old_where
__version__ = "2018.04.16"
|
density215/d215-miniblog | refs/heads/master | django/contrib/webdesign/models.py | 12133432 | |
netfirms/erpnext | refs/heads/develop | erpnext/stock/report/stock_ageing/__init__.py | 12133432 | |
openiitbombayx/edx-platform | refs/heads/master | common/djangoapps/track/tests/test_util.py | 239 | from datetime import datetime
import json
from pytz import UTC
from django.test import TestCase
from track.utils import DateTimeJSONEncoder
class TestDateTimeJSONEncoder(TestCase):
def test_datetime_encoding(self):
a_naive_datetime = datetime(2012, 05, 01, 07, 27, 10, 20000)
a_tz_datetime = datetime(2012, 05, 01, 07, 27, 10, 20000, tzinfo=UTC)
a_date = a_naive_datetime.date()
an_iso_datetime = '2012-05-01T07:27:10.020000+00:00'
an_iso_date = '2012-05-01'
obj = {
'number': 100,
'string': 'hello',
'object': {'a': 1},
'a_datetime': a_naive_datetime,
'a_tz_datetime': a_tz_datetime,
'a_date': a_date,
}
to_json = json.dumps(obj, cls=DateTimeJSONEncoder)
from_json = json.loads(to_json)
self.assertEqual(from_json['number'], 100)
self.assertEqual(from_json['string'], 'hello')
self.assertEqual(from_json['object'], {'a': 1})
self.assertEqual(from_json['a_datetime'], an_iso_datetime)
self.assertEqual(from_json['a_tz_datetime'], an_iso_datetime)
self.assertEqual(from_json['a_date'], an_iso_date)
|
gregswift/ansible | refs/heads/devel | lib/ansible/compat/six/__init__.py | 82 | # (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
'''
Compat six library. RHEL7 has python-six 1.3.0 which is too old
'''
# The following makes it easier for us to script updates of the bundled code
_BUNDLED_METADATA = { "pypi_name": "six", "version": "1.10.0" }
import os.path
try:
import six as _system_six
except ImportError:
_system_six = None
if _system_six:
# If we need some things from even newer versions of six, then we need to
# use our bundled copy instead
if ( # Added in six-1.8.0
not hasattr(_system_six.moves, 'shlex_quote') or
# Added in six-1.4.0
not hasattr(_system_six, 'byte2int') or
not hasattr(_system_six, 'add_metaclass') or
not hasattr(_system_six.moves, 'urllib')
):
_system_six = False
if _system_six:
six = _system_six
else:
from . import _six as six
six_py_file = '{0}.py'.format(os.path.splitext(six.__file__)[0])
exec(open(six_py_file, 'rb').read())
|
mgilson/html5lib-python | refs/heads/master | html5lib/treebuilders/__init__.py | 1730 | """A collection of modules for building different kinds of tree from
HTML documents.
To create a treebuilder for a new type of tree, you need to do
implement several things:
1) A set of classes for various types of elements: Document, Doctype,
Comment, Element. These must implement the interface of
_base.treebuilders.Node (although comment nodes have a different
signature for their constructor, see treebuilders.etree.Comment)
Textual content may also be implemented as another node type, or not, as
your tree implementation requires.
2) A treebuilder object (called TreeBuilder by convention) that
inherits from treebuilders._base.TreeBuilder. This has 4 required attributes:
documentClass - the class to use for the bottommost node of a document
elementClass - the class to use for HTML Elements
commentClass - the class to use for comments
doctypeClass - the class to use for doctypes
It also has one required method:
getDocument - Returns the root node of the complete document tree
3) If you wish to run the unit tests, you must also create a
testSerializer method on your treebuilder which accepts a node and
returns a string containing Node and its children serialized according
to the format used in the unittests
"""
from __future__ import absolute_import, division, unicode_literals
from ..utils import default_etree
treeBuilderCache = {}
def getTreeBuilder(treeType, implementation=None, **kwargs):
"""Get a TreeBuilder class for various types of tree with built-in support
treeType - the name of the tree type required (case-insensitive). Supported
values are:
"dom" - A generic builder for DOM implementations, defaulting to
a xml.dom.minidom based implementation.
"etree" - A generic builder for tree implementations exposing an
ElementTree-like interface, defaulting to
xml.etree.cElementTree if available and
xml.etree.ElementTree if not.
"lxml" - A etree-based builder for lxml.etree, handling
limitations of lxml's implementation.
implementation - (Currently applies to the "etree" and "dom" tree types). A
module implementing the tree type e.g.
xml.etree.ElementTree or xml.etree.cElementTree."""
treeType = treeType.lower()
if treeType not in treeBuilderCache:
if treeType == "dom":
from . import dom
# Come up with a sane default (pref. from the stdlib)
if implementation is None:
from xml.dom import minidom
implementation = minidom
# NEVER cache here, caching is done in the dom submodule
return dom.getDomModule(implementation, **kwargs).TreeBuilder
elif treeType == "lxml":
from . import etree_lxml
treeBuilderCache[treeType] = etree_lxml.TreeBuilder
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeBuilder
else:
raise ValueError("""Unrecognised treebuilder "%s" """ % treeType)
return treeBuilderCache.get(treeType)
|
wzbozon/statsmodels | refs/heads/master | statsmodels/examples/ex_nearest_corr.py | 34 | # -*- coding: utf-8 -*-
"""Find near positive definite correlation and covariance matrices
Created on Sun Aug 19 15:25:07 2012
Author: Josef Perktold
TODO:
add examples for cov_nearest from script log
Notes
-----
We are looking at eigenvalues before and after the conversion to psd matrix.
As distance measure for how close the change in the matrix is, we consider
the sum of squared differences (Frobenious norm without taking the square root)
"""
from __future__ import print_function
import numpy as np
from statsmodels.stats.correlation_tools import (
corr_nearest, corr_clipped, cov_nearest)
examples = ['all']
if 'all' in examples:
# x0 is positive definite
x0 = np.array([[1, -0.2, -0.9], [-0.2, 1, -0.2], [-0.9, -0.2, 1]])
# x has negative eigenvalues, not definite
x = np.array([[1, -0.9, -0.9], [-0.9, 1, -0.9], [-0.9, -0.9, 1]])
#x = np.array([[1, 0.2, 0.2], [0.2, 1, 0.2], [0.2, 0.2, 1]])
n_fact = 2
print('evals original', np.linalg.eigvalsh(x))
y = corr_nearest(x, n_fact=100)
print('evals nearest', np.linalg.eigvalsh(y))
print(y)
y = corr_nearest(x, n_fact=100, threshold=1e-16)
print('evals nearest', np.linalg.eigvalsh(y))
print(y)
y = corr_clipped(x, threshold=1e-16)
print('evals clipped', np.linalg.eigvalsh(y))
print(y)
np.set_printoptions(precision=4)
print('\nMini Monte Carlo')
# we are simulating a uniformly distributed symmetric matrix
# and find close positive definite matrix
# original can be far away from positive definite,
# then original and converted matrices can be far apart in norm
# results are printed for visual inspection of different cases
k_vars = 5
diag_idx = np.arange(k_vars)
for ii in range(10):
print()
x = np.random.uniform(-1, 1, size=(k_vars, k_vars))
x = (x + x.T) * 0.5
x[diag_idx, diag_idx] = 1
#x_std = np.sqrt(np.diag(x))
#x = x / x_std / x_std[:,None]
print()
print(np.sort(np.linalg.eigvals(x)), 'original')
yn = corr_nearest(x, threshold=1e-12, n_fact=200)
print(np.sort(np.linalg.eigvals(yn)), ((yn - x)**2).sum(), 'nearest')
yc = corr_clipped(x, threshold=1e-12)
print(np.sort(np.linalg.eigvals(yc)), ((yc - x)**2).sum(), 'clipped')
import time
t0 = time.time()
for _ in range(100):
corr_nearest(x, threshold=1e-15, n_fact=100)
t1 = time.time()
for _ in range(1000):
corr_clipped(x, threshold=1e-15)
t2 = time.time()
print('\ntime (nearest, clipped):', t1 - t0, t2 - t1)
if 'all' in examples:
# example for test case against R
x2 = np.array([ 1, 0.477, 0.644, 0.478, 0.651, 0.826,
0.477, 1, 0.516, 0.233, 0.682, 0.75,
0.644, 0.516, 1, 0.599, 0.581, 0.742,
0.478, 0.233, 0.599, 1, 0.741, 0.8,
0.651, 0.682, 0.581, 0.741, 1, 0.798,
0.826, 0.75, 0.742, 0.8, 0.798, 1]).reshape(6,6)
y1 = corr_nearest(x2, threshold=1e-15, n_fact=200)
y2 = corr_clipped(x2, threshold=1e-15)
print('\nmatrix 2')
print(np.sort(np.linalg.eigvals(x2)), 'original')
print(np.sort(np.linalg.eigvals(y1)), ((y1 - x2)**2).sum(), 'nearest')
print(np.sort(np.linalg.eigvals(y1)), ((y2 - x2)**2).sum(), 'clipped')
|
wilsoncampusano/gae-boilerplate | refs/heads/master | bp_includes/external/babel/messages/extract.py | 54 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
"""Basic infrastructure for extracting localizable messages from source files.
This module defines an extensible system for collecting localizable message
strings from a variety of sources. A native extractor for Python source files
is builtin, extractors for other sources can be added using very simple plugins.
The main entry points into the extraction functionality are the functions
`extract_from_dir` and `extract_from_file`.
"""
import os
try:
set
except NameError:
from sets import Set as set
import sys
from tokenize import generate_tokens, COMMENT, NAME, OP, STRING
from babel.util import parse_encoding, pathmatch, relpath
from textwrap import dedent
__all__ = ['extract', 'extract_from_dir', 'extract_from_file']
__docformat__ = 'restructuredtext en'
GROUP_NAME = 'babel.extractors'
DEFAULT_KEYWORDS = {
'_': None,
'gettext': None,
'ngettext': (1, 2),
'ugettext': None,
'ungettext': (1, 2),
'dgettext': (2,),
'dngettext': (2, 3),
'N_': None
}
DEFAULT_MAPPING = [('**.py', 'python')]
empty_msgid_warning = (
'%s: warning: Empty msgid. It is reserved by GNU gettext: gettext("") '
'returns the header entry with meta information, not the empty string.')
def _strip_comment_tags(comments, tags):
"""Helper function for `extract` that strips comment tags from strings
in a list of comment lines. This functions operates in-place.
"""
def _strip(line):
for tag in tags:
if line.startswith(tag):
return line[len(tag):].strip()
return line
comments[:] = map(_strip, comments)
def extract_from_dir(dirname=os.getcwd(), method_map=DEFAULT_MAPPING,
options_map=None, keywords=DEFAULT_KEYWORDS,
comment_tags=(), callback=None, strip_comment_tags=False):
"""Extract messages from any source files found in the given directory.
This function generates tuples of the form:
``(filename, lineno, message, comments)``
Which extraction method is used per file is determined by the `method_map`
parameter, which maps extended glob patterns to extraction method names.
For example, the following is the default mapping:
>>> method_map = [
... ('**.py', 'python')
... ]
This basically says that files with the filename extension ".py" at any
level inside the directory should be processed by the "python" extraction
method. Files that don't match any of the mapping patterns are ignored. See
the documentation of the `pathmatch` function for details on the pattern
syntax.
The following extended mapping would also use the "genshi" extraction
method on any file in "templates" subdirectory:
>>> method_map = [
... ('**/templates/**.*', 'genshi'),
... ('**.py', 'python')
... ]
The dictionary provided by the optional `options_map` parameter augments
these mappings. It uses extended glob patterns as keys, and the values are
dictionaries mapping options names to option values (both strings).
The glob patterns of the `options_map` do not necessarily need to be the
same as those used in the method mapping. For example, while all files in
the ``templates`` folders in an application may be Genshi applications, the
options for those files may differ based on extension:
>>> options_map = {
... '**/templates/**.txt': {
... 'template_class': 'genshi.template:TextTemplate',
... 'encoding': 'latin-1'
... },
... '**/templates/**.html': {
... 'include_attrs': ''
... }
... }
:param dirname: the path to the directory to extract messages from
:param method_map: a list of ``(pattern, method)`` tuples that maps of
extraction method names to extended glob patterns
:param options_map: a dictionary of additional options (optional)
:param keywords: a dictionary mapping keywords (i.e. names of functions
that should be recognized as translation functions) to
tuples that specify which of their arguments contain
localizable strings
:param comment_tags: a list of tags of translator comments to search for
and include in the results
:param callback: a function that is called for every file that message are
extracted from, just before the extraction itself is
performed; the function is passed the filename, the name
of the extraction method and and the options dictionary as
positional arguments, in that order
:param strip_comment_tags: a flag that if set to `True` causes all comment
tags to be removed from the collected comments.
:return: an iterator over ``(filename, lineno, funcname, message)`` tuples
:rtype: ``iterator``
:see: `pathmatch`
"""
if options_map is None:
options_map = {}
absname = os.path.abspath(dirname)
for root, dirnames, filenames in os.walk(absname):
for subdir in dirnames:
if subdir.startswith('.') or subdir.startswith('_'):
dirnames.remove(subdir)
dirnames.sort()
filenames.sort()
for filename in filenames:
filename = relpath(
os.path.join(root, filename).replace(os.sep, '/'),
dirname
)
for pattern, method in method_map:
if pathmatch(pattern, filename):
filepath = os.path.join(absname, filename)
options = {}
for opattern, odict in options_map.items():
if pathmatch(opattern, filename):
options = odict
if callback:
callback(filename, method, options)
for lineno, message, comments in \
extract_from_file(method, filepath,
keywords=keywords,
comment_tags=comment_tags,
options=options,
strip_comment_tags=
strip_comment_tags):
yield filename, lineno, message, comments
break
def extract_from_file(method, filename, keywords=DEFAULT_KEYWORDS,
comment_tags=(), options=None, strip_comment_tags=False):
"""Extract messages from a specific file.
This function returns a list of tuples of the form:
``(lineno, funcname, message)``
:param filename: the path to the file to extract messages from
:param method: a string specifying the extraction method (.e.g. "python")
:param keywords: a dictionary mapping keywords (i.e. names of functions
that should be recognized as translation functions) to
tuples that specify which of their arguments contain
localizable strings
:param comment_tags: a list of translator tags to search for and include
in the results
:param strip_comment_tags: a flag that if set to `True` causes all comment
tags to be removed from the collected comments.
:param options: a dictionary of additional options (optional)
:return: the list of extracted messages
:rtype: `list`
"""
fileobj = open(filename, 'U')
try:
return list(extract(method, fileobj, keywords, comment_tags, options,
strip_comment_tags))
finally:
fileobj.close()
def extract(method, fileobj, keywords=DEFAULT_KEYWORDS, comment_tags=(),
options=None, strip_comment_tags=False):
"""Extract messages from the given file-like object using the specified
extraction method.
This function returns a list of tuples of the form:
``(lineno, message, comments)``
The implementation dispatches the actual extraction to plugins, based on the
value of the ``method`` parameter.
>>> source = '''# foo module
... def run(argv):
... print _('Hello, world!')
... '''
>>> from StringIO import StringIO
>>> for message in extract('python', StringIO(source)):
... print message
(3, u'Hello, world!', [])
:param method: a string specifying the extraction method (.e.g. "python");
if this is a simple name, the extraction function will be
looked up by entry point; if it is an explicit reference
to a function (of the form ``package.module:funcname`` or
``package.module.funcname``), the corresponding function
will be imported and used
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a dictionary mapping keywords (i.e. names of functions
that should be recognized as translation functions) to
tuples that specify which of their arguments contain
localizable strings
:param comment_tags: a list of translator tags to search for and include
in the results
:param options: a dictionary of additional options (optional)
:param strip_comment_tags: a flag that if set to `True` causes all comment
tags to be removed from the collected comments.
:return: the list of extracted messages
:rtype: `list`
:raise ValueError: if the extraction method is not registered
"""
func = None
if ':' in method or '.' in method:
if ':' not in method:
lastdot = method.rfind('.')
module, attrname = method[:lastdot], method[lastdot + 1:]
else:
module, attrname = method.split(':', 1)
func = getattr(__import__(module, {}, {}, [attrname]), attrname)
else:
try:
from pkg_resources import working_set
except ImportError:
# pkg_resources is not available, so we resort to looking up the
# builtin extractors directly
builtin = {'ignore': extract_nothing, 'python': extract_python}
func = builtin.get(method)
else:
for entry_point in working_set.iter_entry_points(GROUP_NAME,
method):
func = entry_point.load(require=True)
break
if func is None:
raise ValueError('Unknown extraction method %r' % method)
results = func(fileobj, keywords.keys(), comment_tags,
options=options or {})
for lineno, funcname, messages, comments in results:
if funcname:
spec = keywords[funcname] or (1,)
else:
spec = (1,)
if not isinstance(messages, (list, tuple)):
messages = [messages]
if not messages:
continue
# Validate the messages against the keyword's specification
msgs = []
invalid = False
# last_index is 1 based like the keyword spec
last_index = len(messages)
for index in spec:
if last_index < index:
# Not enough arguments
invalid = True
break
message = messages[index - 1]
if message is None:
invalid = True
break
msgs.append(message)
if invalid:
continue
first_msg_index = spec[0] - 1
if not messages[first_msg_index]:
# An empty string msgid isn't valid, emit a warning
where = '%s:%i' % (hasattr(fileobj, 'name') and \
fileobj.name or '(unknown)', lineno)
print >> sys.stderr, empty_msgid_warning % where
continue
messages = tuple(msgs)
if len(messages) == 1:
messages = messages[0]
if strip_comment_tags:
_strip_comment_tags(comments, comment_tags)
yield lineno, messages, comments
def extract_nothing(fileobj, keywords, comment_tags, options):
"""Pseudo extractor that does not actually extract anything, but simply
returns an empty list.
"""
return []
def extract_python(fileobj, keywords, comment_tags, options):
"""Extract messages from Python source code.
:param fileobj: the seekable, file-like object the messages should be
extracted from
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param comment_tags: a list of translator tags to search for and include
in the results
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples
:rtype: ``iterator``
"""
funcname = lineno = message_lineno = None
call_stack = -1
buf = []
messages = []
translator_comments = []
in_def = in_translator_comments = False
comment_tag = None
encoding = parse_encoding(fileobj) or options.get('encoding', 'iso-8859-1')
tokens = generate_tokens(fileobj.readline)
for tok, value, (lineno, _), _, _ in tokens:
if call_stack == -1 and tok == NAME and value in ('def', 'class'):
in_def = True
elif tok == OP and value == '(':
if in_def:
# Avoid false positives for declarations such as:
# def gettext(arg='message'):
in_def = False
continue
if funcname:
message_lineno = lineno
call_stack += 1
elif in_def and tok == OP and value == ':':
# End of a class definition without parens
in_def = False
continue
elif call_stack == -1 and tok == COMMENT:
# Strip the comment token from the line
value = value.decode(encoding)[1:].strip()
if in_translator_comments and \
translator_comments[-1][0] == lineno - 1:
# We're already inside a translator comment, continue appending
translator_comments.append((lineno, value))
continue
# If execution reaches this point, let's see if comment line
# starts with one of the comment tags
for comment_tag in comment_tags:
if value.startswith(comment_tag):
in_translator_comments = True
translator_comments.append((lineno, value))
break
elif funcname and call_stack == 0:
if tok == OP and value == ')':
if buf:
messages.append(''.join(buf))
del buf[:]
else:
messages.append(None)
if len(messages) > 1:
messages = tuple(messages)
else:
messages = messages[0]
# Comments don't apply unless they immediately preceed the
# message
if translator_comments and \
translator_comments[-1][0] < message_lineno - 1:
translator_comments = []
yield (message_lineno, funcname, messages,
[comment[1] for comment in translator_comments])
funcname = lineno = message_lineno = None
call_stack = -1
messages = []
translator_comments = []
in_translator_comments = False
elif tok == STRING:
# Unwrap quotes in a safe manner, maintaining the string's
# encoding
# https://sourceforge.net/tracker/?func=detail&atid=355470&
# aid=617979&group_id=5470
value = eval('# coding=%s\n%s' % (encoding, value),
{'__builtins__':{}}, {})
if isinstance(value, str):
value = value.decode(encoding)
buf.append(value)
elif tok == OP and value == ',':
if buf:
messages.append(''.join(buf))
del buf[:]
else:
messages.append(None)
if translator_comments:
# We have translator comments, and since we're on a
# comma(,) user is allowed to break into a new line
# Let's increase the last comment's lineno in order
# for the comment to still be a valid one
old_lineno, old_comment = translator_comments.pop()
translator_comments.append((old_lineno+1, old_comment))
elif call_stack > 0 and tok == OP and value == ')':
call_stack -= 1
elif funcname and call_stack == -1:
funcname = None
elif tok == NAME and value in keywords:
funcname = value
def extract_javascript(fileobj, keywords, comment_tags, options):
"""Extract messages from JavaScript source code.
:param fileobj: the seekable, file-like object the messages should be
extracted from
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param comment_tags: a list of translator tags to search for and include
in the results
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples
:rtype: ``iterator``
"""
from babel.messages.jslexer import tokenize, unquote_string
funcname = message_lineno = None
messages = []
last_argument = None
translator_comments = []
concatenate_next = False
encoding = options.get('encoding', 'utf-8')
last_token = None
call_stack = -1
for token in tokenize(fileobj.read().decode(encoding)):
if token.type == 'operator' and token.value == '(':
if funcname:
message_lineno = token.lineno
call_stack += 1
elif call_stack == -1 and token.type == 'linecomment':
value = token.value[2:].strip()
if translator_comments and \
translator_comments[-1][0] == token.lineno - 1:
translator_comments.append((token.lineno, value))
continue
for comment_tag in comment_tags:
if value.startswith(comment_tag):
translator_comments.append((token.lineno, value.strip()))
break
elif token.type == 'multilinecomment':
# only one multi-line comment may preceed a translation
translator_comments = []
value = token.value[2:-2].strip()
for comment_tag in comment_tags:
if value.startswith(comment_tag):
lines = value.splitlines()
if lines:
lines[0] = lines[0].strip()
lines[1:] = dedent('\n'.join(lines[1:])).splitlines()
for offset, line in enumerate(lines):
translator_comments.append((token.lineno + offset,
line))
break
elif funcname and call_stack == 0:
if token.type == 'operator' and token.value == ')':
if last_argument is not None:
messages.append(last_argument)
if len(messages) > 1:
messages = tuple(messages)
elif messages:
messages = messages[0]
else:
messages = None
# Comments don't apply unless they immediately precede the
# message
if translator_comments and \
translator_comments[-1][0] < message_lineno - 1:
translator_comments = []
if messages is not None:
yield (message_lineno, funcname, messages,
[comment[1] for comment in translator_comments])
funcname = message_lineno = last_argument = None
concatenate_next = False
translator_comments = []
messages = []
call_stack = -1
elif token.type == 'string':
new_value = unquote_string(token.value)
if concatenate_next:
last_argument = (last_argument or '') + new_value
concatenate_next = False
else:
last_argument = new_value
elif token.type == 'operator':
if token.value == ',':
if last_argument is not None:
messages.append(last_argument)
last_argument = None
else:
messages.append(None)
concatenate_next = False
elif token.value == '+':
concatenate_next = True
elif call_stack > 0 and token.type == 'operator' \
and token.value == ')':
call_stack -= 1
elif funcname and call_stack == -1:
funcname = None
elif call_stack == -1 and token.type == 'name' and \
token.value in keywords and \
(last_token is None or last_token.type != 'name' or
last_token.value != 'function'):
funcname = token.value
last_token = token
|
hsaputra/tensorflow | refs/heads/master | tensorflow/python/debug/wrappers/local_cli_wrapper.py | 5 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Debugger Wrapper Session Consisting of a Local Curses-based CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import shutil
import sys
import tempfile
# Google-internal import(s).
from tensorflow.python.debug.cli import analyzer_cli
from tensorflow.python.debug.cli import cli_shared
from tensorflow.python.debug.cli import command_parser
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.cli import profile_analyzer_cli
from tensorflow.python.debug.cli import stepper_cli
from tensorflow.python.debug.cli import ui_factory
from tensorflow.python.debug.lib import common
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.wrappers import framework
_DUMP_ROOT_PREFIX = "tfdbg_"
class LocalCLIDebugWrapperSession(framework.BaseDebugWrapperSession):
"""Concrete subclass of BaseDebugWrapperSession implementing a local CLI.
This class has all the methods that a `session.Session` object has, in order
to support debugging with minimal code changes. Invoking its `run()` method
will launch the command-line interface (CLI) of tfdbg.
"""
def __init__(self,
sess,
dump_root=None,
log_usage=True,
ui_type="curses",
thread_name_filter=None):
"""Constructor of LocalCLIDebugWrapperSession.
Args:
sess: The TensorFlow `Session` object being wrapped.
dump_root: (`str`) optional path to the dump root directory. Must be a
directory that does not exist or an empty directory. If the directory
does not exist, it will be created by the debugger core during debug
`run()` calls and removed afterwards. If `None`, the debug dumps will
be at tfdbg_<random_string> under the system temp directory.
log_usage: (`bool`) whether the usage of this class is to be logged.
ui_type: (`str`) requested UI type. Currently supported:
(curses | readline)
thread_name_filter: Regular-expression white list for thread name. See
the doc of `BaseDebugWrapperSession` for details.
Raises:
ValueError: If dump_root is an existing and non-empty directory or if
dump_root is a file.
"""
if log_usage:
pass # No logging for open-source.
framework.BaseDebugWrapperSession.__init__(
self, sess, thread_name_filter=thread_name_filter)
if not dump_root:
self._dump_root = tempfile.mktemp(prefix=_DUMP_ROOT_PREFIX)
else:
if os.path.isfile(dump_root):
raise ValueError("dump_root path points to a file: %s" % dump_root)
elif os.path.isdir(dump_root) and os.listdir(dump_root):
raise ValueError("dump_root path points to a non-empty directory: %s" %
dump_root)
self._dump_root = dump_root
self._initialize_argparsers()
# Registered tensor filters.
self._tensor_filters = {}
# Register frequently-used filter(s).
self.add_tensor_filter("has_inf_or_nan", debug_data.has_inf_or_nan)
# Below are the state variables of this wrapper object.
# _active_tensor_filter: what (if any) tensor filter is in effect. If such
# a filter is in effect, this object will call run() method of the
# underlying TensorFlow Session object until the filter passes. This is
# activated by the "-f" flag of the "run" command.
# _run_through_times: keeps track of how many times the wrapper needs to
# run through without stopping at the run-end CLI. It is activated by the
# "-t" option of the "run" command.
# _skip_debug: keeps track of whether the current run should be executed
# without debugging. It is activated by the "-n" option of the "run"
# command.
#
# _run_start_response: keeps track what OnRunStartResponse the wrapper
# should return at the next run-start callback. If this information is
# unavailable (i.e., is None), the run-start CLI will be launched to ask
# the user. This is the case, e.g., right before the first run starts.
self._active_tensor_filter = None
self._active_tensor_filter_run_start_response = None
self._run_through_times = 1
self._skip_debug = False
self._run_start_response = None
self._is_run_start = True
self._ui_type = ui_type
def _initialize_argparsers(self):
self._argparsers = {}
ap = argparse.ArgumentParser(
description="Run through, with or without debug tensor watching.",
usage=argparse.SUPPRESS)
ap.add_argument(
"-t",
"--times",
dest="times",
type=int,
default=1,
help="How many Session.run() calls to proceed with.")
ap.add_argument(
"-n",
"--no_debug",
dest="no_debug",
action="store_true",
help="Run through without debug tensor watching.")
ap.add_argument(
"-f",
"--till_filter_pass",
dest="till_filter_pass",
type=str,
default="",
help="Run until a tensor in the graph passes the specified filter.")
ap.add_argument(
"--node_name_filter",
dest="node_name_filter",
type=str,
default="",
help="Regular-expression filter for node names to be watched in the "
"run, e.g., loss, reshape.*")
ap.add_argument(
"--op_type_filter",
dest="op_type_filter",
type=str,
default="",
help="Regular-expression filter for op type to be watched in the run, "
"e.g., (MatMul|Add), Variable.*")
ap.add_argument(
"--tensor_dtype_filter",
dest="tensor_dtype_filter",
type=str,
default="",
help="Regular-expression filter for tensor dtype to be watched in the "
"run, e.g., (float32|float64), int.*")
ap.add_argument(
"-p",
"--profile",
dest="profile",
action="store_true",
help="Run and profile TensorFlow graph execution.")
self._argparsers["run"] = ap
ap = argparse.ArgumentParser(
description="Invoke stepper (cont, step, breakpoint, etc.)",
usage=argparse.SUPPRESS)
self._argparsers["invoke_stepper"] = ap
ap = argparse.ArgumentParser(
description="Display information about this Session.run() call.",
usage=argparse.SUPPRESS)
self._argparsers["run_info"] = ap
self._argparsers["print_feed"] = command_parser.get_print_tensor_argparser(
"Print the value of a feed in feed_dict.")
def add_tensor_filter(self, filter_name, tensor_filter):
"""Add a tensor filter.
Args:
filter_name: (`str`) name of the filter.
tensor_filter: (`callable`) the filter callable. See the doc string of
`DebugDumpDir.find()` for more details about its signature.
"""
self._tensor_filters[filter_name] = tensor_filter
def on_session_init(self, request):
"""Overrides on-session-init callback.
Args:
request: An instance of `OnSessionInitRequest`.
Returns:
An instance of `OnSessionInitResponse`.
"""
return framework.OnSessionInitResponse(
framework.OnSessionInitAction.PROCEED)
def on_run_start(self, request):
"""Overrides on-run-start callback.
Invoke the CLI to let user choose what action to take:
`run` / `invoke_stepper`.
Args:
request: An instance of `OnRunStartRequest`.
Returns:
An instance of `OnRunStartResponse`.
"""
self._is_run_start = True
self._update_run_calls_state(
request.run_call_count, request.fetches, request.feed_dict,
is_callable_runner=request.is_callable_runner)
if self._active_tensor_filter:
# If we are running until a filter passes, we just need to keep running
# with the previous `OnRunStartResponse`.
return self._active_tensor_filter_run_start_response
self._exit_if_requested_by_user()
if self._run_call_count > 1 and not self._skip_debug:
if self._run_through_times > 0:
# Just run through without debugging.
return framework.OnRunStartResponse(
framework.OnRunStartAction.NON_DEBUG_RUN, [])
elif self._run_through_times == 0:
# It is the run at which the run-end CLI will be launched: activate
# debugging.
return (self._run_start_response or
framework.OnRunStartResponse(
framework.OnRunStartAction.DEBUG_RUN,
self._get_run_debug_urls()))
if self._run_start_response is None:
self._prep_cli_for_run_start()
self._run_start_response = self._launch_cli()
if self._active_tensor_filter:
self._active_tensor_filter_run_start_response = self._run_start_response
if self._run_through_times > 1:
self._run_through_times -= 1
self._exit_if_requested_by_user()
return self._run_start_response
def _exit_if_requested_by_user(self):
if self._run_start_response == debugger_cli_common.EXPLICIT_USER_EXIT:
# Explicit user "exit" command leads to sys.exit(1).
print(
"Note: user exited from debugger CLI: Calling sys.exit(1).",
file=sys.stderr)
sys.exit(1)
def _prep_cli_for_run_start(self):
"""Prepare (but not launch) the CLI for run-start."""
self._run_cli = ui_factory.get_ui(self._ui_type)
help_intro = debugger_cli_common.RichTextLines([])
if self._run_call_count == 1:
# Show logo at the onset of the first run.
help_intro.extend(cli_shared.get_tfdbg_logo())
help_intro.extend(debugger_cli_common.RichTextLines("Upcoming run:"))
help_intro.extend(self._run_info)
self._run_cli.set_help_intro(help_intro)
# Create initial screen output detailing the run.
self._title = "run-start: " + self._run_description
self._init_command = "run_info"
self._title_color = "blue_on_white"
def on_run_end(self, request):
"""Overrides on-run-end callback.
Actions taken:
1) Load the debug dump.
2) Bring up the Analyzer CLI.
Args:
request: An instance of OnSessionInitRequest.
Returns:
An instance of OnSessionInitResponse.
"""
self._is_run_start = False
if request.performed_action == framework.OnRunStartAction.DEBUG_RUN:
partition_graphs = None
if request.run_metadata and request.run_metadata.partition_graphs:
partition_graphs = request.run_metadata.partition_graphs
elif request.client_graph_def:
partition_graphs = [request.client_graph_def]
if request.tf_error and not os.path.isdir(self._dump_root):
# It is possible that the dump root may not exist due to errors that
# have occurred prior to graph execution (e.g., invalid device
# assignments), in which case we will just raise the exception as the
# unwrapped Session does.
raise request.tf_error
debug_dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=partition_graphs)
debug_dump.set_python_graph(self._sess.graph)
passed_filter = None
if self._active_tensor_filter:
if not debug_dump.find(
self._tensor_filters[self._active_tensor_filter], first_n=1):
# No dumped tensor passes the filter in this run. Clean up the dump
# directory and move on.
self._remove_dump_root()
return framework.OnRunEndResponse()
else:
# Some dumped tensor(s) from this run passed the filter.
passed_filter = self._active_tensor_filter
self._active_tensor_filter = None
self._prep_debug_cli_for_run_end(
debug_dump, request.tf_error, passed_filter)
self._run_start_response = self._launch_cli()
# Clean up the dump generated by this run.
self._remove_dump_root()
elif request.performed_action == framework.OnRunStartAction.PROFILE_RUN:
self._prep_profile_cli_for_run_end(self._sess.graph, request.run_metadata)
self._run_start_response = self._launch_cli()
else:
# No debug information to show following a non-debug run() call.
self._run_start_response = None
# Return placeholder response that currently holds no additional
# information.
return framework.OnRunEndResponse()
def _remove_dump_root(self):
if os.path.isdir(self._dump_root):
shutil.rmtree(self._dump_root)
def _prep_debug_cli_for_run_end(self, debug_dump, tf_error, passed_filter):
"""Prepare (but not launch) CLI for run-end, with debug dump from the run.
Args:
debug_dump: (debug_data.DebugDumpDir) The debug dump directory from this
run.
tf_error: (None or OpError) OpError that happened during the run() call
(if any).
passed_filter: (None or str) Name of the tensor filter that just passed
and caused the preparation of this run-end CLI (if any).
"""
if tf_error:
help_intro = cli_shared.get_error_intro(tf_error)
self._init_command = "help"
self._title_color = "red_on_white"
else:
help_intro = None
self._init_command = "lt"
self._title_color = "black_on_white"
if passed_filter is not None:
# Some dumped tensor(s) from this run passed the filter.
self._init_command = "lt -f %s" % passed_filter
self._title_color = "red_on_white"
self._run_cli = analyzer_cli.create_analyzer_ui(
debug_dump, self._tensor_filters, ui_type=self._ui_type,
on_ui_exit=self._remove_dump_root)
# Get names of all dumped tensors.
dumped_tensor_names = []
for datum in debug_dump.dumped_tensor_data:
dumped_tensor_names.append("%s:%d" %
(datum.node_name, datum.output_slot))
# Tab completions for command "print_tensors".
self._run_cli.register_tab_comp_context(["print_tensor", "pt"],
dumped_tensor_names)
# Tab completion for commands "node_info", "list_inputs" and
# "list_outputs". The list comprehension is used below because nodes()
# output can be unicodes and they need to be converted to strs.
self._run_cli.register_tab_comp_context(
["node_info", "ni", "list_inputs", "li", "list_outputs", "lo"],
[str(node_name) for node_name in debug_dump.nodes()])
# TODO(cais): Reduce API surface area for aliases vis-a-vis tab
# completion contexts and registered command handlers.
self._title = "run-end: " + self._run_description
if help_intro:
self._run_cli.set_help_intro(help_intro)
def _prep_profile_cli_for_run_end(self, py_graph, run_metadata):
self._init_command = "lp"
self._run_cli = profile_analyzer_cli.create_profiler_ui(
py_graph, run_metadata, ui_type=self._ui_type,
config=self._run_cli.config)
self._title = "run-end (profiler mode): " + self._run_description
def _launch_cli(self):
"""Launch the interactive command-line interface.
Returns:
The OnRunStartResponse specified by the user using the "run" command.
"""
self._register_this_run_info(self._run_cli)
response = self._run_cli.run_ui(
init_command=self._init_command,
title=self._title,
title_color=self._title_color)
return response
def _run_info_handler(self, args, screen_info=None):
output = debugger_cli_common.RichTextLines([])
if self._run_call_count == 1:
output.extend(cli_shared.get_tfdbg_logo())
output.extend(self._run_info)
if (not self._is_run_start and
debugger_cli_common.MAIN_MENU_KEY in output.annotations):
menu = output.annotations[debugger_cli_common.MAIN_MENU_KEY]
if "list_tensors" not in menu.captions():
menu.insert(
0, debugger_cli_common.MenuItem("list_tensors", "list_tensors"))
return output
def _print_feed_handler(self, args, screen_info=None):
np_printoptions = cli_shared.numpy_printoptions_from_screen_info(
screen_info)
if not self._feed_dict:
return cli_shared.error(
"The feed_dict of the current run is None or empty.")
parsed = self._argparsers["print_feed"].parse_args(args)
tensor_name, tensor_slicing = (
command_parser.parse_tensor_name_with_slicing(parsed.tensor_name))
feed_key = None
feed_value = None
for key in self._feed_dict:
key_name = common.get_graph_element_name(key)
if key_name == tensor_name:
feed_key = key_name
feed_value = self._feed_dict[key]
break
if feed_key is None:
return cli_shared.error(
"The feed_dict of the current run does not contain the key %s" %
tensor_name)
else:
return cli_shared.format_tensor(
feed_value,
feed_key + " (feed)",
np_printoptions,
print_all=parsed.print_all,
tensor_slicing=tensor_slicing,
highlight_options=cli_shared.parse_ranges_highlight(parsed.ranges),
include_numeric_summary=parsed.numeric_summary)
def _run_handler(self, args, screen_info=None):
"""Command handler for "run" command during on-run-start."""
del screen_info # Currently unused.
parsed = self._argparsers["run"].parse_args(args)
parsed.node_name_filter = parsed.node_name_filter or None
parsed.op_type_filter = parsed.op_type_filter or None
parsed.tensor_dtype_filter = parsed.tensor_dtype_filter or None
if parsed.profile:
raise debugger_cli_common.CommandLineExit(
exit_token=framework.OnRunStartResponse(
framework.OnRunStartAction.PROFILE_RUN, []))
self._skip_debug = parsed.no_debug
self._run_through_times = parsed.times
if parsed.times > 1 or parsed.no_debug:
# If requested -t times > 1, the very next run will be a non-debug run.
action = framework.OnRunStartAction.NON_DEBUG_RUN
debug_urls = []
else:
action = framework.OnRunStartAction.DEBUG_RUN
debug_urls = self._get_run_debug_urls()
run_start_response = framework.OnRunStartResponse(
action,
debug_urls,
node_name_regex_whitelist=parsed.node_name_filter,
op_type_regex_whitelist=parsed.op_type_filter,
tensor_dtype_regex_whitelist=parsed.tensor_dtype_filter)
if parsed.till_filter_pass:
# For the run-till-filter-pass (run -f) mode, use the DEBUG_RUN
# option to access the intermediate tensors, and set the corresponding
# state flag of the class itself to True.
if parsed.till_filter_pass in self._tensor_filters:
action = framework.OnRunStartAction.DEBUG_RUN
self._active_tensor_filter = parsed.till_filter_pass
self._active_tensor_filter_run_start_response = run_start_response
else:
# Handle invalid filter name.
return debugger_cli_common.RichTextLines(
["ERROR: tensor filter \"%s\" does not exist." %
parsed.till_filter_pass])
# Raise CommandLineExit exception to cause the CLI to exit.
raise debugger_cli_common.CommandLineExit(exit_token=run_start_response)
def _register_this_run_info(self, curses_cli):
curses_cli.register_command_handler(
"run",
self._run_handler,
self._argparsers["run"].format_help(),
prefix_aliases=["r"])
curses_cli.register_command_handler(
"invoke_stepper",
self._on_run_start_step_handler,
self._argparsers["invoke_stepper"].format_help(),
prefix_aliases=["s"])
curses_cli.register_command_handler(
"run_info",
self._run_info_handler,
self._argparsers["run_info"].format_help(),
prefix_aliases=["ri"])
curses_cli.register_command_handler(
"print_feed",
self._print_feed_handler,
self._argparsers["print_feed"].format_help(),
prefix_aliases=["pf"])
if self._tensor_filters:
# Register tab completion for the filter names.
curses_cli.register_tab_comp_context(["run", "r"],
list(self._tensor_filters.keys()))
if self._feed_dict:
# Register tab completion for feed_dict keys.
feed_keys = [common.get_graph_element_name(key)
for key in self._feed_dict.keys()]
curses_cli.register_tab_comp_context(["print_feed", "pf"], feed_keys)
def _on_run_start_step_handler(self, args, screen_info=None):
"""Command handler for "invoke_stepper" command during on-run-start."""
_ = screen_info # Currently unused.
# No parsing is currently necessary for invoke_stepper. This may change
# in the future when the command has arguments.
# Raise CommandLineExit exception to cause the CLI to exit.
raise debugger_cli_common.CommandLineExit(
exit_token=framework.OnRunStartResponse(
framework.OnRunStartAction.INVOKE_STEPPER, []))
def _get_run_debug_urls(self):
"""Get the debug_urls value for the current run() call.
Returns:
debug_urls: (list of str) Debug URLs for the current run() call.
Currently, the list consists of only one URL that is a file:// URL.
"""
return ["file://" + self._dump_root]
def _update_run_calls_state(self,
run_call_count,
fetches,
feed_dict,
is_callable_runner=False):
"""Update the internal state with regard to run() call history.
Args:
run_call_count: (int) Number of run() calls that have occurred.
fetches: a node/tensor or a list of node/tensor that are the fetches of
the run() call. This is the same as the fetches argument to the run()
call.
feed_dict: None of a dict. This is the feed_dict argument to the run()
call.
is_callable_runner: (bool) whether a runner returned by
Session.make_callable is being run.
"""
self._run_call_count = run_call_count
self._feed_dict = feed_dict
self._run_description = cli_shared.get_run_short_description(
run_call_count,
fetches,
feed_dict,
is_callable_runner=is_callable_runner)
self._run_through_times -= 1
self._run_info = cli_shared.get_run_start_intro(
run_call_count,
fetches,
feed_dict,
self._tensor_filters,
is_callable_runner=is_callable_runner)
def invoke_node_stepper(self,
node_stepper,
restore_variable_values_on_exit=True):
"""Overrides method in base class to implement interactive node stepper.
Args:
node_stepper: (`stepper.NodeStepper`) The underlying NodeStepper API
object.
restore_variable_values_on_exit: (`bool`) Whether any variables whose
values have been altered during this node-stepper invocation should be
restored to their old values when this invocation ends.
Returns:
The same return values as the `Session.run()` call on the same fetches as
the NodeStepper.
"""
stepper = stepper_cli.NodeStepperCLI(node_stepper)
# On exiting the node-stepper CLI, the finalize method of the node_stepper
# object will be called, ensuring that the state of the graph will be the
# same as if the stepping did not happen.
# TODO(cais): Perhaps some users will want the effect of the interactive
# stepping and value injection to persist. When that happens, make the call
# to finalize optional.
stepper_ui = ui_factory.get_ui(
self._ui_type,
on_ui_exit=(node_stepper.restore_variable_values if
restore_variable_values_on_exit else None))
stepper_ui.register_command_handler(
"list_sorted_nodes",
stepper.list_sorted_nodes,
stepper.arg_parsers["list_sorted_nodes"].format_help(),
prefix_aliases=["lt", "lsn"])
stepper_ui.register_command_handler(
"cont",
stepper.cont,
stepper.arg_parsers["cont"].format_help(),
prefix_aliases=["ct", "c"])
stepper_ui.register_command_handler(
"step",
stepper.step,
stepper.arg_parsers["step"].format_help(),
prefix_aliases=["st", "s"])
stepper_ui.register_command_handler(
"print_tensor",
stepper.print_tensor,
stepper.arg_parsers["print_tensor"].format_help(),
prefix_aliases=["pt"])
stepper_ui.register_command_handler(
"inject_value",
stepper.inject_value,
stepper.arg_parsers["inject_value"].format_help(),
prefix_aliases=["inject", "override_value", "override"])
# Register tab completion candidates.
stepper_ui.register_tab_comp_context([
"cont", "ct", "c", "pt", "inject_value", "inject", "override_value",
"override"
], [str(elem) for elem in node_stepper.sorted_nodes()])
# TODO(cais): Tie up register_tab_comp_context to a single alias to shorten
# calls like this.
return stepper_ui.run_ui(
init_command="lt",
title="Node Stepper: " + self._run_description,
title_color="blue_on_white")
|
kholidfu/django | refs/heads/master | django/conf/locale/bn/__init__.py | 12133432 | |
etos/django | refs/heads/master | tests/csrf_tests/__init__.py | 12133432 | |
keedio/hue | refs/heads/master | desktop/core/ext-py/Django-1.6.10/django/conf/locale/ru/__init__.py | 12133432 | |
Astonex/Antelope | refs/heads/master | antelope/antelope/settings/__init__.py | 12133432 | |
TifaSky/zenoss_midrest | refs/heads/master | app/tests/__init__.py | 12133432 | |
erudit/zenon | refs/heads/master | eruditorg/core/metrics/management/commands/__init__.py | 12133432 | |
DavidNorman/tensorflow | refs/heads/master | tensorflow/python/saved_model/simple_save_test.py | 22 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SavedModel simple save functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import simple_save
from tensorflow.python.saved_model import tag_constants
class SimpleSaveTest(test.TestCase):
def _init_and_validate_variable(self, sess, variable_name, variable_value):
v = variables.Variable(variable_value, name=variable_name)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(variable_value, self.evaluate(v))
return v
def _check_variable_info(self, actual_variable, expected_variable):
self.assertEqual(actual_variable.name, expected_variable.name)
self.assertEqual(actual_variable.dtype, expected_variable.dtype)
self.assertEqual(len(actual_variable.shape), len(expected_variable.shape))
for i in range(len(actual_variable.shape)):
self.assertEqual(actual_variable.shape[i], expected_variable.shape[i])
def _check_tensor_info(self, actual_tensor_info, expected_tensor):
self.assertEqual(actual_tensor_info.name, expected_tensor.name)
self.assertEqual(actual_tensor_info.dtype, expected_tensor.dtype)
self.assertEqual(
len(actual_tensor_info.tensor_shape.dim), len(expected_tensor.shape))
for i in range(len(actual_tensor_info.tensor_shape.dim)):
self.assertEqual(actual_tensor_info.tensor_shape.dim[i].size,
expected_tensor.shape[i])
@test_util.run_deprecated_v1
def testSimpleSave(self):
"""Test simple_save that uses the default parameters."""
export_dir = os.path.join(test.get_temp_dir(),
"test_simple_save")
# Initialize input and output variables and save a prediction graph using
# the default parameters.
with self.session(graph=ops.Graph()) as sess:
var_x = self._init_and_validate_variable(sess, "var_x", 1)
var_y = self._init_and_validate_variable(sess, "var_y", 2)
inputs = {"x": var_x}
outputs = {"y": var_y}
simple_save.simple_save(sess, export_dir, inputs, outputs)
# Restore the graph with a valid tag and check the global variables and
# signature def map.
with self.session(graph=ops.Graph()) as sess:
graph = loader.load(sess, [tag_constants.SERVING], export_dir)
collection_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
# Check value and metadata of the saved variables.
self.assertEqual(len(collection_vars), 2)
self.assertEqual(1, collection_vars[0].eval())
self.assertEqual(2, collection_vars[1].eval())
self._check_variable_info(collection_vars[0], var_x)
self._check_variable_info(collection_vars[1], var_y)
# Check that the appropriate signature_def_map is created with the
# default key and method name, and the specified inputs and outputs.
signature_def_map = graph.signature_def
self.assertEqual(1, len(signature_def_map))
self.assertEqual(signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY,
list(signature_def_map.keys())[0])
signature_def = signature_def_map[
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
self.assertEqual(signature_constants.PREDICT_METHOD_NAME,
signature_def.method_name)
self.assertEqual(1, len(signature_def.inputs))
self._check_tensor_info(signature_def.inputs["x"], var_x)
self.assertEqual(1, len(signature_def.outputs))
self._check_tensor_info(signature_def.outputs["y"], var_y)
if __name__ == "__main__":
test.main()
|
maartenq/ansible | refs/heads/devel | lib/ansible/modules/utilities/logic/import_role.py | 24 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'
}
DOCUMENTATION = '''
---
author: Ansible Core Team (@ansible)
module: import_role
short_description: Import a role into a play
description:
- Much like the `roles:` keyword, this task loads a role, but it allows you to control it when the role tasks run in
between other tasks of the play.
- Most keywords, loops and conditionals will only be applied to the imported tasks, not to this statement itself. If
you want the opposite behavior, use M(include_role) instead. To better understand the difference you can read
the L(Including and Importing Guide,../user_guide/playbooks_reuse_includes.html).
version_added: "2.4"
options:
name:
description:
- The name of the role to be executed.
required: True
tasks_from:
description:
- File to load from a role's C(tasks/) directory.
default: main
vars_from:
description:
- File to load from a role's C(vars/) directory.
default: main
defaults_from:
description:
- File to load from a role's C(defaults/) directory.
default: main
allow_duplicates:
description:
- Overrides the role's metadata setting to allow using a role more than once with the same parameters.
type: bool
default: 'yes'
notes:
- Handlers are made available to the whole play.
- "Since Ansible 2.7: variables defined in C(vars) and C(defaults) for the role are exposed at playbook parsing time.
Due to this, these variables will be accessible to roles and tasks executed before the location of the
C(import_role) task."
- Unlike C(include_role) variable exposure is not configurable, and will always be exposed.
'''
EXAMPLES = """
- hosts: all
tasks:
- import_role:
name: myrole
- name: Run tasks/other.yaml instead of 'main'
import_role:
name: myrole
tasks_from: other
- name: Pass variables to role
import_role:
name: myrole
vars:
rolevar1: value from task
- name: Apply condition to each task in role
import_role:
name: myrole
when: not idontwanttorun
"""
RETURN = """
# This module does not return anything except tasks to execute.
"""
|
mbayon/TFG-MachineLearning | refs/heads/master | venv/lib/python3.6/site-packages/django/views/debug.py | 18 | from __future__ import unicode_literals
import re
import sys
import types
from django.conf import settings
from django.http import HttpResponse, HttpResponseNotFound
from django.template import Context, Engine, TemplateDoesNotExist
from django.template.defaultfilters import force_escape, pprint
from django.urls import Resolver404, resolve
from django.utils import lru_cache, six, timezone
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_bytes, force_text
from django.utils.module_loading import import_string
from django.utils.translation import ugettext as _
# Minimal Django templates engine to render the error templates
# regardless of the project's TEMPLATES setting.
DEBUG_ENGINE = Engine(debug=True)
HIDDEN_SETTINGS = re.compile('API|TOKEN|KEY|SECRET|PASS|SIGNATURE', flags=re.IGNORECASE)
CLEANSED_SUBSTITUTE = '********************'
class CallableSettingWrapper(object):
""" Object to wrap callable appearing in settings
* Not to call in the debug page (#21345).
* Not to break the debug page if the callable forbidding to set attributes (#23070).
"""
def __init__(self, callable_setting):
self._wrapped = callable_setting
def __repr__(self):
return repr(self._wrapped)
def cleanse_setting(key, value):
"""Cleanse an individual setting key/value of sensitive content.
If the value is a dictionary, recursively cleanse the keys in
that dictionary.
"""
try:
if HIDDEN_SETTINGS.search(key):
cleansed = CLEANSED_SUBSTITUTE
else:
if isinstance(value, dict):
cleansed = {k: cleanse_setting(k, v) for k, v in value.items()}
else:
cleansed = value
except TypeError:
# If the key isn't regex-able, just return as-is.
cleansed = value
if callable(cleansed):
# For fixing #21345 and #23070
cleansed = CallableSettingWrapper(cleansed)
return cleansed
def get_safe_settings():
"Returns a dictionary of the settings module, with sensitive settings blurred out."
settings_dict = {}
for k in dir(settings):
if k.isupper():
settings_dict[k] = cleanse_setting(k, getattr(settings, k))
return settings_dict
def technical_500_response(request, exc_type, exc_value, tb, status_code=500):
"""
Create a technical server error response. The last three arguments are
the values returned from sys.exc_info() and friends.
"""
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
if request.is_ajax():
text = reporter.get_traceback_text()
return HttpResponse(text, status=status_code, content_type='text/plain')
else:
html = reporter.get_traceback_html()
return HttpResponse(html, status=status_code, content_type='text/html')
@lru_cache.lru_cache()
def get_default_exception_reporter_filter():
# Instantiate the default filter for the first time and cache it.
return import_string(settings.DEFAULT_EXCEPTION_REPORTER_FILTER)()
def get_exception_reporter_filter(request):
default_filter = get_default_exception_reporter_filter()
return getattr(request, 'exception_reporter_filter', default_filter)
class ExceptionReporterFilter(object):
"""
Base for all exception reporter filter classes. All overridable hooks
contain lenient default behaviors.
"""
def get_post_parameters(self, request):
if request is None:
return {}
else:
return request.POST
def get_traceback_frame_variables(self, request, tb_frame):
return list(tb_frame.f_locals.items())
class SafeExceptionReporterFilter(ExceptionReporterFilter):
"""
Use annotations made by the sensitive_post_parameters and
sensitive_variables decorators to filter out sensitive information.
"""
def is_active(self, request):
"""
This filter is to add safety in production environments (i.e. DEBUG
is False). If DEBUG is True then your site is not safe anyway.
This hook is provided as a convenience to easily activate or
deactivate the filter on a per request basis.
"""
return settings.DEBUG is False
def get_cleansed_multivaluedict(self, request, multivaluedict):
"""
Replaces the keys in a MultiValueDict marked as sensitive with stars.
This mitigates leaking sensitive POST parameters if something like
request.POST['nonexistent_key'] throws an exception (#21098).
"""
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
multivaluedict = multivaluedict.copy()
for param in sensitive_post_parameters:
if param in multivaluedict:
multivaluedict[param] = CLEANSED_SUBSTITUTE
return multivaluedict
def get_post_parameters(self, request):
"""
Replaces the values of POST parameters marked as sensitive with
stars (*********).
"""
if request is None:
return {}
else:
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
cleansed = request.POST.copy()
if sensitive_post_parameters == '__ALL__':
# Cleanse all parameters.
for k, v in cleansed.items():
cleansed[k] = CLEANSED_SUBSTITUTE
return cleansed
else:
# Cleanse only the specified parameters.
for param in sensitive_post_parameters:
if param in cleansed:
cleansed[param] = CLEANSED_SUBSTITUTE
return cleansed
else:
return request.POST
def cleanse_special_types(self, request, value):
try:
# If value is lazy or a complex object of another kind, this check
# might raise an exception. isinstance checks that lazy
# MultiValueDicts will have a return value.
is_multivalue_dict = isinstance(value, MultiValueDict)
except Exception as e:
return '{!r} while evaluating {!r}'.format(e, value)
if is_multivalue_dict:
# Cleanse MultiValueDicts (request.POST is the one we usually care about)
value = self.get_cleansed_multivaluedict(request, value)
return value
def get_traceback_frame_variables(self, request, tb_frame):
"""
Replaces the values of variables marked as sensitive with
stars (*********).
"""
# Loop through the frame's callers to see if the sensitive_variables
# decorator was used.
current_frame = tb_frame.f_back
sensitive_variables = None
while current_frame is not None:
if (current_frame.f_code.co_name == 'sensitive_variables_wrapper' and
'sensitive_variables_wrapper' in current_frame.f_locals):
# The sensitive_variables decorator was used, so we take note
# of the sensitive variables' names.
wrapper = current_frame.f_locals['sensitive_variables_wrapper']
sensitive_variables = getattr(wrapper, 'sensitive_variables', None)
break
current_frame = current_frame.f_back
cleansed = {}
if self.is_active(request) and sensitive_variables:
if sensitive_variables == '__ALL__':
# Cleanse all variables
for name, value in tb_frame.f_locals.items():
cleansed[name] = CLEANSED_SUBSTITUTE
else:
# Cleanse specified variables
for name, value in tb_frame.f_locals.items():
if name in sensitive_variables:
value = CLEANSED_SUBSTITUTE
else:
value = self.cleanse_special_types(request, value)
cleansed[name] = value
else:
# Potentially cleanse the request and any MultiValueDicts if they
# are one of the frame variables.
for name, value in tb_frame.f_locals.items():
cleansed[name] = self.cleanse_special_types(request, value)
if (tb_frame.f_code.co_name == 'sensitive_variables_wrapper' and
'sensitive_variables_wrapper' in tb_frame.f_locals):
# For good measure, obfuscate the decorated function's arguments in
# the sensitive_variables decorator's frame, in case the variables
# associated with those arguments were meant to be obfuscated from
# the decorated function's frame.
cleansed['func_args'] = CLEANSED_SUBSTITUTE
cleansed['func_kwargs'] = CLEANSED_SUBSTITUTE
return cleansed.items()
class ExceptionReporter(object):
"""
A class to organize and coordinate reporting on exceptions.
"""
def __init__(self, request, exc_type, exc_value, tb, is_email=False):
self.request = request
self.filter = get_exception_reporter_filter(self.request)
self.exc_type = exc_type
self.exc_value = exc_value
self.tb = tb
self.is_email = is_email
self.template_info = getattr(self.exc_value, 'template_debug', None)
self.template_does_not_exist = False
self.postmortem = None
def get_traceback_data(self):
"""Return a dictionary containing traceback information."""
if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist):
self.template_does_not_exist = True
self.postmortem = self.exc_value.chain or [self.exc_value]
frames = self.get_traceback_frames()
for i, frame in enumerate(frames):
if 'vars' in frame:
frame_vars = []
for k, v in frame['vars']:
v = pprint(v)
# The force_escape filter assume unicode, make sure that works
if isinstance(v, six.binary_type):
v = v.decode('utf-8', 'replace') # don't choke on non-utf-8 input
# Trim large blobs of data
if len(v) > 4096:
v = '%s... <trimmed %d bytes string>' % (v[0:4096], len(v))
frame_vars.append((k, force_escape(v)))
frame['vars'] = frame_vars
frames[i] = frame
unicode_hint = ''
if self.exc_type and issubclass(self.exc_type, UnicodeError):
start = getattr(self.exc_value, 'start', None)
end = getattr(self.exc_value, 'end', None)
if start is not None and end is not None:
unicode_str = self.exc_value.args[1]
unicode_hint = force_text(
unicode_str[max(start - 5, 0):min(end + 5, len(unicode_str))],
'ascii', errors='replace'
)
from django import get_version
if self.request is None:
user_str = None
else:
try:
user_str = force_text(self.request.user)
except Exception:
# request.user may raise OperationalError if the database is
# unavailable, for example.
user_str = '[unable to retrieve the current user]'
c = {
'is_email': self.is_email,
'unicode_hint': unicode_hint,
'frames': frames,
'request': self.request,
'user_str': user_str,
'filtered_POST_items': list(self.filter.get_post_parameters(self.request).items()),
'settings': get_safe_settings(),
'sys_executable': sys.executable,
'sys_version_info': '%d.%d.%d' % sys.version_info[0:3],
'server_time': timezone.now(),
'django_version_info': get_version(),
'sys_path': sys.path,
'template_info': self.template_info,
'template_does_not_exist': self.template_does_not_exist,
'postmortem': self.postmortem,
}
if self.request is not None:
c['request_GET_items'] = self.request.GET.items()
c['request_FILES_items'] = self.request.FILES.items()
c['request_COOKIES_items'] = self.request.COOKIES.items()
# Check whether exception info is available
if self.exc_type:
c['exception_type'] = self.exc_type.__name__
if self.exc_value:
c['exception_value'] = force_text(self.exc_value, errors='replace')
if frames:
c['lastframe'] = frames[-1]
return c
def get_traceback_html(self):
"Return HTML version of debug 500 HTTP error page."
t = DEBUG_ENGINE.from_string(TECHNICAL_500_TEMPLATE)
c = Context(self.get_traceback_data(), use_l10n=False)
return t.render(c)
def get_traceback_text(self):
"Return plain text version of debug 500 HTTP error page."
t = DEBUG_ENGINE.from_string(TECHNICAL_500_TEXT_TEMPLATE)
c = Context(self.get_traceback_data(), autoescape=False, use_l10n=False)
return t.render(c)
def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
source = None
if loader is not None and hasattr(loader, "get_source"):
try:
source = loader.get_source(module_name)
except ImportError:
pass
if source is not None:
source = source.splitlines()
if source is None:
try:
with open(filename, 'rb') as fp:
source = fp.read().splitlines()
except (OSError, IOError):
pass
if source is None:
return None, [], None, []
# If we just read the source from a file, or if the loader did not
# apply tokenize.detect_encoding to decode the source into a Unicode
# string, then we should do that ourselves.
if isinstance(source[0], six.binary_type):
encoding = 'ascii'
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (http://www.python.org/dev/peps/pep-0263/)
match = re.search(br'coding[:=]\s*([-\w.]+)', line)
if match:
encoding = match.group(1).decode('ascii')
break
source = [six.text_type(sline, encoding, 'replace') for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
pre_context = source[lower_bound:lineno]
context_line = source[lineno]
post_context = source[lineno + 1:upper_bound]
return lower_bound, pre_context, context_line, post_context
def get_traceback_frames(self):
def explicit_or_implicit_cause(exc_value):
explicit = getattr(exc_value, '__cause__', None)
implicit = getattr(exc_value, '__context__', None)
return explicit or implicit
# Get the exception and all its causes
exceptions = []
exc_value = self.exc_value
while exc_value:
exceptions.append(exc_value)
exc_value = explicit_or_implicit_cause(exc_value)
frames = []
# No exceptions were supplied to ExceptionReporter
if not exceptions:
return frames
# In case there's just one exception (always in Python 2,
# sometimes in Python 3), take the traceback from self.tb (Python 2
# doesn't have a __traceback__ attribute on Exception)
exc_value = exceptions.pop()
tb = self.tb if six.PY2 or not exceptions else exc_value.__traceback__
while tb is not None:
# Support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
if tb.tb_frame.f_locals.get('__traceback_hide__'):
tb = tb.tb_next
continue
filename = tb.tb_frame.f_code.co_filename
function = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno - 1
loader = tb.tb_frame.f_globals.get('__loader__')
module_name = tb.tb_frame.f_globals.get('__name__') or ''
pre_context_lineno, pre_context, context_line, post_context = self._get_lines_from_file(
filename, lineno, 7, loader, module_name,
)
if pre_context_lineno is not None:
frames.append({
'exc_cause': explicit_or_implicit_cause(exc_value),
'exc_cause_explicit': getattr(exc_value, '__cause__', True),
'tb': tb,
'type': 'django' if module_name.startswith('django.') else 'user',
'filename': filename,
'function': function,
'lineno': lineno + 1,
'vars': self.filter.get_traceback_frame_variables(self.request, tb.tb_frame),
'id': id(tb),
'pre_context': pre_context,
'context_line': context_line,
'post_context': post_context,
'pre_context_lineno': pre_context_lineno + 1,
})
# If the traceback for current exception is consumed, try the
# other exception.
if six.PY2:
tb = tb.tb_next
elif not tb.tb_next and exceptions:
exc_value = exceptions.pop()
tb = exc_value.__traceback__
else:
tb = tb.tb_next
return frames
def format_exception(self):
"""
Return the same data as from traceback.format_exception.
"""
import traceback
frames = self.get_traceback_frames()
tb = [(f['filename'], f['lineno'], f['function'], f['context_line']) for f in frames]
list = ['Traceback (most recent call last):\n']
list += traceback.format_list(tb)
list += traceback.format_exception_only(self.exc_type, self.exc_value)
return list
def technical_404_response(request, exception):
"Create a technical 404 error response. The exception should be the Http404."
try:
error_url = exception.args[0]['path']
except (IndexError, TypeError, KeyError):
error_url = request.path_info[1:] # Trim leading slash
try:
tried = exception.args[0]['tried']
except (IndexError, TypeError, KeyError):
tried = []
else:
if (not tried or ( # empty URLconf
request.path == '/' and
len(tried) == 1 and # default URLconf
len(tried[0]) == 1 and
getattr(tried[0][0], 'app_name', '') == getattr(tried[0][0], 'namespace', '') == 'admin'
)):
return default_urlconf(request)
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__
caller = ''
try:
resolver_match = resolve(request.path)
except Resolver404:
pass
else:
obj = resolver_match.func
if hasattr(obj, '__name__'):
caller = obj.__name__
elif hasattr(obj, '__class__') and hasattr(obj.__class__, '__name__'):
caller = obj.__class__.__name__
if hasattr(obj, '__module__'):
module = obj.__module__
caller = '%s.%s' % (module, caller)
t = DEBUG_ENGINE.from_string(TECHNICAL_404_TEMPLATE)
c = Context({
'urlconf': urlconf,
'root_urlconf': settings.ROOT_URLCONF,
'request_path': error_url,
'urlpatterns': tried,
'reason': force_bytes(exception, errors='replace'),
'request': request,
'settings': get_safe_settings(),
'raising_view_name': caller,
})
return HttpResponseNotFound(t.render(c), content_type='text/html')
def default_urlconf(request):
"Create an empty URLconf 404 error response."
t = DEBUG_ENGINE.from_string(DEFAULT_URLCONF_TEMPLATE)
c = Context({
"title": _("Welcome to Django"),
"heading": _("It worked!"),
"subheading": _("Congratulations on your first Django-powered page."),
"instructions": _(
"Next, start your first app by running <code>python manage.py startapp [app_label]</code>."
),
"explanation": _(
"You're seeing this message because you have <code>DEBUG = True</code> in your "
"Django settings file and you haven't configured any URLs. Get to work!"
),
})
return HttpResponse(t.render(c), content_type='text/html')
#
# Templates are embedded in the file so that we know the error handler will
# always work even if the template loader is broken.
#
TECHNICAL_500_TEMPLATE = ("""
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE">
<title>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}"""
r"""{% if request %} at {{ request.path_info|escape }}{% endif %}</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
code, pre { font-size: 100%; white-space: pre-wrap; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th {
padding:1px 6px 1px 3px; background:#fefefe; text-align:left;
font-weight:normal; font-size:11px; border:1px solid #ddd;
}
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
table.vars { margin:5px 0 2px 40px; }
table.vars td, table.req td { font-family:monospace; }
table td.code { width:100%; }
table td.code pre { overflow:hidden; }
table.source th { color:#666; }
table.source td { font-family:monospace; white-space:pre; border-bottom:1px solid #eee; }
ul.traceback { list-style-type:none; color: #222; }
ul.traceback li.frame { padding-bottom:1em; color:#666; }
ul.traceback li.user { background-color:#e0e0e0; color:#000 }
div.context { padding:10px 0; overflow:hidden; }
div.context ol { padding-left:30px; margin:0 10px; list-style-position: inside; }
div.context ol li { font-family:monospace; white-space:pre; color:#777; cursor:pointer; padding-left: 2px; }
div.context ol li pre { display:inline; }
div.context ol.context-line li { color:#505050; background-color:#dfdfdf; padding: 3px 2px; }
div.context ol.context-line li span { position:absolute; right:32px; }
.user div.context ol.context-line li { background-color:#bbb; color:#000; }
.user div.context ol li { color:#666; }
div.commands { margin-left: 40px; }
div.commands a { color:#555; text-decoration:none; }
.user div.commands a { color: black; }
#summary { background: #ffc; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#template, #template-not-exist { background:#f6f6f6; }
#template-not-exist ul { margin: 0 0 10px 20px; }
#template-not-exist .postmortem-section { margin-bottom: 3px; }
#unicode-hint { background:#eee; }
#traceback { background:#eee; }
#requestinfo { background:#f6f6f6; padding-left:120px; }
#summary table { border:none; background:transparent; }
#requestinfo h2, #requestinfo h3 { position:relative; margin-left:-100px; }
#requestinfo h3 { margin-bottom:-1em; }
.error { background: #ffc; }
.specific { color:#cc3300; font-weight:bold; }
h2 span.commands { font-size:.7em;}
span.commands a:link {color:#5E5694;}
pre.exception_value { font-family: sans-serif; color: #666; font-size: 1.5em; margin: 10px 0 10px 0; }
.append-bottom { margin-bottom: 10px; }
</style>
{% if not is_email %}
<script type="text/javascript">
//<!--
function getElementsByClassName(oElm, strTagName, strClassName){
// Written by Jonathan Snook, http://www.snook.ca/jon; Add-ons by Robert Nyman, http://www.robertnyman.com
var arrElements = (strTagName == "*" && document.all)? document.all :
oElm.getElementsByTagName(strTagName);
var arrReturnElements = new Array();
strClassName = strClassName.replace(/\-/g, "\\-");
var oRegExp = new RegExp("(^|\\s)" + strClassName + "(\\s|$)");
var oElement;
for(var i=0; i<arrElements.length; i++){
oElement = arrElements[i];
if(oRegExp.test(oElement.className)){
arrReturnElements.push(oElement);
}
}
return (arrReturnElements)
}
function hideAll(elems) {
for (var e = 0; e < elems.length; e++) {
elems[e].style.display = 'none';
}
}
window.onload = function() {
hideAll(getElementsByClassName(document, 'table', 'vars'));
hideAll(getElementsByClassName(document, 'ol', 'pre-context'));
hideAll(getElementsByClassName(document, 'ol', 'post-context'));
hideAll(getElementsByClassName(document, 'div', 'pastebin'));
}
function toggle() {
for (var i = 0; i < arguments.length; i++) {
var e = document.getElementById(arguments[i]);
if (e) {
e.style.display = e.style.display == 'none' ? 'block': 'none';
}
}
return false;
}
function varToggle(link, id) {
toggle('v' + id);
var s = link.getElementsByTagName('span')[0];
var uarr = String.fromCharCode(0x25b6);
var darr = String.fromCharCode(0x25bc);
s.textContent = s.textContent == uarr ? darr : uarr;
return false;
}
function switchPastebinFriendly(link) {
s1 = "Switch to copy-and-paste view";
s2 = "Switch back to interactive view";
link.textContent = link.textContent.trim() == s1 ? s2: s1;
toggle('browserTraceback', 'pastebinTraceback');
return false;
}
//-->
</script>
{% endif %}
</head>
<body>
<div id="summary">
<h1>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}"""
"""{% if request %} at {{ request.path_info|escape }}{% endif %}</h1>
<pre class="exception_value">"""
"""{% if exception_value %}{{ exception_value|force_escape }}{% else %}No exception message supplied{% endif %}"""
"""</pre>
<table class="meta">
{% if request %}
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.get_raw_uri|escape }}</td>
</tr>
{% endif %}
<tr>
<th>Django Version:</th>
<td>{{ django_version_info }}</td>
</tr>
{% if exception_type %}
<tr>
<th>Exception Type:</th>
<td>{{ exception_type }}</td>
</tr>
{% endif %}
{% if exception_type and exception_value %}
<tr>
<th>Exception Value:</th>
<td><pre>{{ exception_value|force_escape }}</pre></td>
</tr>
{% endif %}
{% if lastframe %}
<tr>
<th>Exception Location:</th>
<td>{{ lastframe.filename|escape }} in {{ lastframe.function|escape }}, line {{ lastframe.lineno }}</td>
</tr>
{% endif %}
<tr>
<th>Python Executable:</th>
<td>{{ sys_executable|escape }}</td>
</tr>
<tr>
<th>Python Version:</th>
<td>{{ sys_version_info }}</td>
</tr>
<tr>
<th>Python Path:</th>
<td><pre>{{ sys_path|pprint }}</pre></td>
</tr>
<tr>
<th>Server time:</th>
<td>{{server_time|date:"r"}}</td>
</tr>
</table>
</div>
{% if unicode_hint %}
<div id="unicode-hint">
<h2>Unicode error hint</h2>
<p>The string that could not be encoded/decoded was: <strong>{{ unicode_hint|force_escape }}</strong></p>
</div>
{% endif %}
{% if template_does_not_exist %}
<div id="template-not-exist">
<h2>Template-loader postmortem</h2>
{% if postmortem %}
<p class="append-bottom">Django tried loading these templates, in this order:</p>
{% for entry in postmortem %}
<p class="postmortem-section">Using engine <code>{{ entry.backend.name }}</code>:</p>
<ul>
{% if entry.tried %}
{% for attempt in entry.tried %}
<li><code>{{ attempt.0.loader_name }}</code>: {{ attempt.0.name }} ({{ attempt.1 }})</li>
{% endfor %}
{% else %}
<li>This engine did not provide a list of tried templates.</li>
{% endif %}
</ul>
{% endfor %}
{% else %}
<p>No templates were found because your 'TEMPLATES' setting is not configured.</p>
{% endif %}
</div>
{% endif %}
{% if template_info %}
<div id="template">
<h2>Error during template rendering</h2>
<p>In template <code>{{ template_info.name }}</code>, error at line <strong>{{ template_info.line }}</strong></p>
<h3>{{ template_info.message }}</h3>
<table class="source{% if template_info.top %} cut-top{% endif %}
{% if template_info.bottom != template_info.total %} cut-bottom{% endif %}">
{% for source_line in template_info.source_lines %}
{% if source_line.0 == template_info.line %}
<tr class="error"><th>{{ source_line.0 }}</th>
<td>{{ template_info.before }}"""
"""<span class="specific">{{ template_info.during }}</span>"""
"""{{ template_info.after }}</td>
</tr>
{% else %}
<tr><th>{{ source_line.0 }}</th>
<td>{{ source_line.1 }}</td></tr>
{% endif %}
{% endfor %}
</table>
</div>
{% endif %}
{% if frames %}
<div id="traceback">
<h2>Traceback <span class="commands">{% if not is_email %}<a href="#" onclick="return switchPastebinFriendly(this);">
Switch to copy-and-paste view</a></span>{% endif %}
</h2>
<div id="browserTraceback">
<ul class="traceback">
{% for frame in frames %}
{% ifchanged frame.exc_cause %}{% if frame.exc_cause %}
<li><h3>
{% if frame.exc_cause_explicit %}
The above exception ({{ frame.exc_cause|force_escape }}) was the direct cause of the following exception:
{% else %}
During handling of the above exception ({{ frame.exc_cause|force_escape }}), another exception occurred:
{% endif %}
</h3></li>
{% endif %}{% endifchanged %}
<li class="frame {{ frame.type }}">
<code>{{ frame.filename }}</code> in <code>{{ frame.function }}</code>
{% if frame.context_line %}
<div class="context" id="c{{ frame.id }}">
{% if frame.pre_context and not is_email %}
<ol start="{{ frame.pre_context_lineno }}" class="pre-context" id="pre{{ frame.id }}">
{% for line in frame.pre_context %}
<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line }}</pre></li>
{% endfor %}
</ol>
{% endif %}
<ol start="{{ frame.lineno }}" class="context-line">
<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>
""" """{{ frame.context_line }}</pre>{% if not is_email %} <span>...</span>{% endif %}</li></ol>
{% if frame.post_context and not is_email %}
<ol start='{{ frame.lineno|add:"1" }}' class="post-context" id="post{{ frame.id }}">
{% for line in frame.post_context %}
<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line }}</pre></li>
{% endfor %}
</ol>
{% endif %}
</div>
{% endif %}
{% if frame.vars %}
<div class="commands">
{% if is_email %}
<h2>Local Vars</h2>
{% else %}
<a href="#" onclick="return varToggle(this, '{{ frame.id }}')"><span>▶</span> Local vars</a>
{% endif %}
</div>
<table class="vars" id="v{{ frame.id }}">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in frame.vars|dictsort:0 %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1 }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% endif %}
</li>
{% endfor %}
</ul>
</div>
<form action="http://dpaste.com/" name="pasteform" id="pasteform" method="post">
{% if not is_email %}
<div id="pastebinTraceback" class="pastebin">
<input type="hidden" name="language" value="PythonConsole">
<input type="hidden" name="title"
value="{{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}">
<input type="hidden" name="source" value="Django Dpaste Agent">
<input type="hidden" name="poster" value="Django">
<textarea name="content" id="traceback_area" cols="140" rows="25">
Environment:
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.get_raw_uri|escape }}
{% endif %}
Django Version: {{ django_version_info }}
Python Version: {{ sys_version_info }}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{% if settings.MIDDLEWARE is not None %}{{ settings.MIDDLEWARE|pprint }}"""
"""{% else %}{{ settings.MIDDLEWARE_CLASSES|pprint }}{% endif %}
{% if template_does_not_exist %}Template loader postmortem
{% if postmortem %}Django tried loading these templates, in this order:
{% for entry in postmortem %}
Using engine {{ entry.backend.name }}:
{% if entry.tried %}{% for attempt in entry.tried %}"""
""" * {{ attempt.0.loader_name }}: {{ attempt.0.name }} ({{ attempt.1 }})
{% endfor %}{% else %} This engine did not provide a list of tried templates.
{% endif %}{% endfor %}
{% else %}No templates were found because your 'TEMPLATES' setting is not configured.
{% endif %}{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}"""
"{% for source_line in template_info.source_lines %}"
"{% if source_line.0 == template_info.line %}"
" {{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}"
"{% else %}"
" {{ source_line.0 }} : {{ source_line.1 }}"
"""{% endif %}{% endfor %}{% endif %}
Traceback:{% for frame in frames %}
{% ifchanged frame.exc_cause %}{% if frame.exc_cause %}{% if frame.exc_cause_explicit %}
The above exception ({{ frame.exc_cause|force_escape }}) was the direct cause of the following exception:
{% else %}
During handling of the above exception ({{ frame.exc_cause|force_escape }}), another exception occurred:
{% endif %}{% endif %}{% endifchanged %}
File "{{ frame.filename|escape }}" in {{ frame.function|escape }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line|escape }}{% endif %}{% endfor %}
Exception Type: {{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}
Exception Value: {{ exception_value|force_escape }}
</textarea>
<br><br>
<input type="submit" value="Share this traceback on a public website">
</div>
</form>
</div>
{% endif %}
{% endif %}
<div id="requestinfo">
<h2>Request information</h2>
{% if request %}
{% if user_str %}
<h3 id="user-info">USER</h3>
<p>{{ user_str }}</p>
{% endif %}
<h3 id="get-info">GET</h3>
{% if request.GET %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for k, v in request_GET_items %}
<tr>
<td>{{ k }}</td>
<td class="code"><pre>{{ v|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No GET data</p>
{% endif %}
<h3 id="post-info">POST</h3>
{% if filtered_POST_items %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for k, v in filtered_POST_items %}
<tr>
<td>{{ k }}</td>
<td class="code"><pre>{{ v|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No POST data</p>
{% endif %}
<h3 id="files-info">FILES</h3>
{% if request.FILES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for k, v in request_FILES_items %}
<tr>
<td>{{ k }}</td>
<td class="code"><pre>{{ v|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No FILES data</p>
{% endif %}
<h3 id="cookie-info">COOKIES</h3>
{% if request.COOKIES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for k, v in request_COOKIES_items %}
<tr>
<td>{{ k }}</td>
<td class="code"><pre>{{ v|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No cookie data</p>
{% endif %}
<h3 id="meta-info">META</h3>
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.META.items|dictsort:0 %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>Request data not supplied</p>
{% endif %}
<h3 id="settings-info">Settings</h3>
<h4>Using settings module <code>{{ settings.SETTINGS_MODULE }}</code></h4>
<table class="req">
<thead>
<tr>
<th>Setting</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in settings.items|dictsort:0 %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% if not is_email %}
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in your
Django settings file. Change that to <code>False</code>, and Django will
display a standard page generated by the handler for this status code.
</p>
</div>
{% endif %}
</body>
</html>
""") # NOQA
TECHNICAL_500_TEXT_TEMPLATE = (""""""
"""{% firstof exception_type 'Report' %}{% if request %} at {{ request.path_info }}{% endif %}
{% firstof exception_value 'No exception message supplied' %}
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.get_raw_uri }}{% endif %}
Django Version: {{ django_version_info }}
Python Executable: {{ sys_executable }}
Python Version: {{ sys_version_info }}
Python Path: {{ sys_path }}
Server time: {{server_time|date:"r"}}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{% if settings.MIDDLEWARE is not None %}{{ settings.MIDDLEWARE|pprint }}"""
"""{% else %}{{ settings.MIDDLEWARE_CLASSES|pprint }}{% endif %}
{% if template_does_not_exist %}Template loader postmortem
{% if postmortem %}Django tried loading these templates, in this order:
{% for entry in postmortem %}
Using engine {{ entry.backend.name }}:
{% if entry.tried %}{% for attempt in entry.tried %}"""
""" * {{ attempt.0.loader_name }}: {{ attempt.0.name }} ({{ attempt.1 }})
{% endfor %}{% else %} This engine did not provide a list of tried templates.
{% endif %}{% endfor %}
{% else %}No templates were found because your 'TEMPLATES' setting is not configured.
{% endif %}
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}
{% for source_line in template_info.source_lines %}"""
"{% if source_line.0 == template_info.line %}"
" {{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}"
"{% else %}"
" {{ source_line.0 }} : {{ source_line.1 }}"
"""{% endif %}{% endfor %}{% endif %}{% if frames %}
Traceback:"""
"{% for frame in frames %}"
"{% ifchanged frame.exc_cause %}"
" {% if frame.exc_cause %}" """
{% if frame.exc_cause_explicit %}
The above exception ({{ frame.exc_cause }}) was the direct cause of the following exception:
{% else %}
During handling of the above exception ({{ frame.exc_cause }}), another exception occurred:
{% endif %}
{% endif %}
{% endifchanged %}
File "{{ frame.filename }}" in {{ frame.function }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line }}{% endif %}
{% endfor %}
{% if exception_type %}Exception Type: {{ exception_type }}{% if request %} at {{ request.path_info }}{% endif %}
{% if exception_value %}Exception Value: {{ exception_value }}{% endif %}{% endif %}{% endif %}
{% if request %}Request information:
{% if user_str %}USER: {{ user_str }}{% endif %}
GET:{% for k, v in request_GET_items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No GET data{% endfor %}
POST:{% for k, v in filtered_POST_items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No POST data{% endfor %}
FILES:{% for k, v in request_FILES_items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No FILES data{% endfor %}
COOKIES:{% for k, v in request_COOKIES_items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No cookie data{% endfor %}
META:{% for k, v in request.META.items|dictsort:0 %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
{% else %}Request data not supplied
{% endif %}
Settings:
Using settings module {{ settings.SETTINGS_MODULE }}{% for k, v in settings.items|dictsort:0 %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
{% if not is_email %}
You're seeing this error because you have DEBUG = True in your
Django settings file. Change that to False, and Django will
display a standard page generated by the handler for this status code.
{% endif %}
""") # NOQA
TECHNICAL_404_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<title>Page not found at {{ request.path_info|escape }}</title>
<meta name="robots" content="NONE,NOARCHIVE">
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; background:#eee; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; margin-bottom:.4em; }
h1 span { font-size:60%; color:#666; font-weight:normal; }
table { border:none; border-collapse: collapse; width:100%; }
td, th { vertical-align:top; padding:2px 3px; }
th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#info { background:#f6f6f6; }
#info ol { margin: 0.5em 4em; }
#info ol li { font-family: monospace; }
#summary { background: #ffc; }
#explanation { background:#eee; border-bottom: 0px none; }
</style>
</head>
<body>
<div id="summary">
<h1>Page not found <span>(404)</span></h1>
<table class="meta">
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
{% if raising_view_name %}
<tr>
<th>Raised by:</th>
<td>{{ raising_view_name }}</td>
</tr>
{% endif %}
</table>
</div>
<div id="info">
{% if urlpatterns %}
<p>
Using the URLconf defined in <code>{{ urlconf }}</code>,
Django tried these URL patterns, in this order:
</p>
<ol>
{% for pattern in urlpatterns %}
<li>
{% for pat in pattern %}
{{ pat.regex.pattern }}
{% if forloop.last and pat.name %}[name='{{ pat.name }}']{% endif %}
{% endfor %}
</li>
{% endfor %}
</ol>
<p>
{% if request_path %}
The current path, <code>{{ request_path|escape }}</code>,{% else %}
The empty path{% endif %} didn't match any of these.
</p>
{% else %}
<p>{{ reason }}</p>
{% endif %}
</div>
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in
your Django settings file. Change that to <code>False</code>, and Django
will display a standard 404 page.
</p>
</div>
</body>
</html>
"""
DEFAULT_URLCONF_TEMPLATE = """
<!DOCTYPE html>
<html lang="en"><head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE"><title>{{ title }}</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th {
padding:1px 6px 1px 3px; background:#fefefe; text-align:left;
font-weight:normal; font-size:11px; border:1px solid #ddd;
}
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#summary { background: #e0ebff; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#instructions { background:#f6f6f6; }
#summary table { border:none; background:transparent; }
</style>
</head>
<body>
<div id="summary">
<h1>{{ heading }}</h1>
<h2>{{ subheading }}</h2>
</div>
<div id="instructions">
<p>
{{ instructions|safe }}
</p>
</div>
<div id="explanation">
<p>
{{ explanation|safe }}
</p>
</div>
</body></html>
"""
|
HackerEarth/cassandra-python-driver | refs/heads/cqlengine_multiplehosts | cassandra/io/geventreactor.py | 1 | # Copyright 2013-2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gevent
from gevent import select, socket, ssl
from gevent.event import Event
from gevent.queue import Queue
from collections import defaultdict
from functools import partial
import logging
import os
from six.moves import xrange
from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, EINVAL
from cassandra import OperationTimedOut
from cassandra.connection import Connection, ConnectionShutdown
from cassandra.protocol import RegisterMessage
log = logging.getLogger(__name__)
def is_timeout(err):
return (
err in (EINPROGRESS, EALREADY, EWOULDBLOCK) or
(err == EINVAL and os.name in ('nt', 'ce'))
)
class GeventConnection(Connection):
"""
An implementation of :class:`.Connection` that utilizes ``gevent``.
"""
_total_reqd_bytes = 0
_read_watcher = None
_write_watcher = None
_socket = None
@classmethod
def factory(cls, *args, **kwargs):
timeout = kwargs.pop('timeout', 5.0)
conn = cls(*args, **kwargs)
conn.connected_event.wait(timeout)
if conn.last_error:
raise conn.last_error
elif not conn.connected_event.is_set():
conn.close()
raise OperationTimedOut("Timed out creating connection")
else:
return conn
def __init__(self, *args, **kwargs):
Connection.__init__(self, *args, **kwargs)
self.connected_event = Event()
self._write_queue = Queue()
self._callbacks = {}
self._push_watchers = defaultdict(set)
sockerr = None
addresses = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM)
for (af, socktype, proto, canonname, sockaddr) in addresses:
try:
self._socket = socket.socket(af, socktype, proto)
if self.ssl_options:
self._socket = ssl.wrap_socket(self._socket, **self.ssl_options)
self._socket.settimeout(1.0)
self._socket.connect(sockaddr)
sockerr = None
break
except socket.error as err:
sockerr = err
if sockerr:
raise socket.error(sockerr.errno, "Tried connecting to %s. Last error: %s" % ([a[4] for a in addresses], sockerr.strerror))
if self.sockopts:
for args in self.sockopts:
self._socket.setsockopt(*args)
self._read_watcher = gevent.spawn(self.handle_read)
self._write_watcher = gevent.spawn(self.handle_write)
self._send_options_message()
def close(self):
with self.lock:
if self.is_closed:
return
self.is_closed = True
log.debug("Closing connection (%s) to %s" % (id(self), self.host))
if self._read_watcher:
self._read_watcher.kill(block=False)
if self._write_watcher:
self._write_watcher.kill(block=False)
if self._socket:
self._socket.close()
log.debug("Closed socket to %s" % (self.host,))
if not self.is_defunct:
self.error_all_callbacks(
ConnectionShutdown("Connection to %s was closed" % self.host))
# don't leave in-progress operations hanging
self.connected_event.set()
def handle_close(self):
log.debug("connection closed by server")
self.close()
def handle_write(self):
run_select = partial(select.select, (), (self._socket,), ())
while True:
try:
next_msg = self._write_queue.get()
run_select()
except Exception as exc:
if not self.is_closed:
log.debug("Exception during write select() for %s: %s", self, exc)
self.defunct(exc)
return
try:
self._socket.sendall(next_msg)
except socket.error as err:
log.debug("Exception during socket sendall for %s: %s", self, err)
self.defunct(err)
return # Leave the write loop
def handle_read(self):
run_select = partial(select.select, (self._socket,), (), ())
while True:
try:
run_select()
except Exception as exc:
if not self.is_closed:
log.debug("Exception during read select() for %s: %s", self, exc)
self.defunct(exc)
return
try:
while True:
buf = self._socket.recv(self.in_buffer_size)
self._iobuf.write(buf)
if len(buf) < self.in_buffer_size:
break
except socket.error as err:
if not is_timeout(err):
log.debug("Exception during socket recv for %s: %s", self, err)
self.defunct(err)
return # leave the read loop
if self._iobuf.tell():
self.process_io_buffer()
else:
log.debug("Connection %s closed by server", self)
self.close()
return
def push(self, data):
chunk_size = self.out_buffer_size
for i in xrange(0, len(data), chunk_size):
self._write_queue.put(data[i:i + chunk_size])
def register_watcher(self, event_type, callback, register_timeout=None):
self._push_watchers[event_type].add(callback)
self.wait_for_response(
RegisterMessage(event_list=[event_type]),
timeout=register_timeout)
def register_watchers(self, type_callback_dict, register_timeout=None):
for event_type, callback in type_callback_dict.items():
self._push_watchers[event_type].add(callback)
self.wait_for_response(
RegisterMessage(event_list=type_callback_dict.keys()),
timeout=register_timeout)
|
Ziqi-Li/bknqgis | refs/heads/master | pandas/pandas/plotting/_compat.py | 11 | # being a bit too dynamic
# pylint: disable=E1101
from __future__ import division
from distutils.version import LooseVersion
def _mpl_le_1_2_1():
try:
import matplotlib as mpl
return (str(mpl.__version__) <= LooseVersion('1.2.1') and
str(mpl.__version__)[0] != '0')
except ImportError:
return False
def _mpl_ge_1_3_1():
try:
import matplotlib
# The or v[0] == '0' is because their versioneer is
# messed up on dev
return (matplotlib.__version__ >= LooseVersion('1.3.1') or
matplotlib.__version__[0] == '0')
except ImportError:
return False
def _mpl_ge_1_4_0():
try:
import matplotlib
return (matplotlib.__version__ >= LooseVersion('1.4') or
matplotlib.__version__[0] == '0')
except ImportError:
return False
def _mpl_ge_1_5_0():
try:
import matplotlib
return (matplotlib.__version__ >= LooseVersion('1.5') or
matplotlib.__version__[0] == '0')
except ImportError:
return False
def _mpl_ge_2_0_0():
try:
import matplotlib
return matplotlib.__version__ >= LooseVersion('2.0')
except ImportError:
return False
def _mpl_le_2_0_0():
try:
import matplotlib
return matplotlib.compare_versions('2.0.0', matplotlib.__version__)
except ImportError:
return False
def _mpl_ge_2_0_1():
try:
import matplotlib
return matplotlib.__version__ >= LooseVersion('2.0.1')
except ImportError:
return False
|
JackDanger/sentry | refs/heads/master | src/sentry/south_migrations/0308_auto__add_versiondsymfile__add_unique_versiondsymfile_dsym_file_versio.py | 2 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'VersionDSymFile'
db.create_table('sentry_versiondsymfile', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('dsym_file', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(to=orm['sentry.ProjectDSymFile'], null=True)),
('dsym_app', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(to=orm['sentry.DSymApp'])),
('version', self.gf('django.db.models.fields.CharField')(max_length=32)),
('build', self.gf('django.db.models.fields.CharField')(max_length=32, null=True)),
('date_added', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal('sentry', ['VersionDSymFile'])
# Adding unique constraint on 'VersionDSymFile', fields ['dsym_file', 'version', 'build']
db.create_unique('sentry_versiondsymfile', ['dsym_file_id', 'version', 'build'])
# Adding model 'DSymApp'
db.create_table('sentry_dsymapp', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('project', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(to=orm['sentry.Project'])),
('app_id', self.gf('django.db.models.fields.CharField')(max_length=64)),
('sync_id', self.gf('django.db.models.fields.CharField')(max_length=64, null=True)),
('data', self.gf('jsonfield.fields.JSONField')(default={})),
('platform', self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(default=0)),
('last_synced', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('date_added', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal('sentry', ['DSymApp'])
# Adding unique constraint on 'DSymApp', fields ['project', 'platform', 'app_id']
db.create_unique('sentry_dsymapp', ['project_id', 'platform', 'app_id'])
def backwards(self, orm):
# Removing unique constraint on 'DSymApp', fields ['project', 'platform', 'app_id']
db.delete_unique('sentry_dsymapp', ['project_id', 'platform', 'app_id'])
# Removing unique constraint on 'VersionDSymFile', fields ['dsym_file', 'version', 'build']
db.delete_unique('sentry_versiondsymfile', ['dsym_file_id', 'version', 'build'])
# Deleting model 'VersionDSymFile'
db.delete_table('sentry_versiondsymfile')
# Deleting model 'DSymApp'
db.delete_table('sentry_dsymapp')
models = {
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apiapplication': {
'Meta': {'object_name': 'ApiApplication'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'client_id': ('django.db.models.fields.CharField', [], {'default': "'6949fb4996184c5abf84d6e2206deb2028847544b25745dcb59f9788f860cd4b'", 'unique': 'True', 'max_length': '64'}),
'client_secret': ('sentry.db.models.fields.encrypted.EncryptedTextField', [], {'default': "'120099c387964b1c826925621112bec1d8dbb441c0f24233b2487fa1350c03d7'"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'homepage_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Transitive Clemente'", 'max_length': '64', 'blank': 'True'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'privacy_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'redirect_uris': ('django.db.models.fields.TextField', [], {}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'terms_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.apiauthorization': {
'Meta': {'unique_together': "(('user', 'application'),)", 'object_name': 'ApiAuthorization'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apigrant': {
'Meta': {'object_name': 'ApiGrant'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']"}),
'code': ('django.db.models.fields.CharField', [], {'default': "'c52f5a5dc7dd41448dc017bd251144dc'", 'max_length': '64', 'db_index': 'True'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2017, 3, 23, 0, 0)', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'redirect_uri': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.apitoken': {
'Meta': {'object_name': 'ApiToken'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2017, 4, 22, 0, 0)', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'refresh_token': ('django.db.models.fields.CharField', [], {'default': "'4f556f8dc184403fa0cd07e85a1a388204ca73f817294a0a9652081c86929bca'", 'max_length': '64', 'unique': 'True', 'null': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'token': ('django.db.models.fields.CharField', [], {'default': "'da4ff55914c148c1b652ef734ac727adc0fc7dc596a2445199009467c7d51337'", 'unique': 'True', 'max_length': '64'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}),
'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authenticator': {
'Meta': {'unique_together': "(('user', 'type'),)", 'object_name': 'Authenticator', 'db_table': "'auth_authenticator'"},
'config': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2017, 3, 30, 0, 0)', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'upstream_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'sentry.broadcastseen': {
'Meta': {'unique_together': "(('broadcast', 'user'),)", 'object_name': 'BroadcastSeen'},
'broadcast': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Broadcast']"}),
'date_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.commit': {
'Meta': {'unique_together': "(('repository_id', 'key'),)", 'object_name': 'Commit', 'index_together': "(('repository_id', 'date_added'),)"},
'author': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.CommitAuthor']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.commitauthor': {
'Meta': {'unique_together': "(('organization_id', 'email'),)", 'object_name': 'CommitAuthor'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.commitfilechange': {
'Meta': {'unique_together': "(('commit', 'filename'),)", 'object_name': 'CommitFileChange'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'sentry.counter': {
'Meta': {'object_name': 'Counter', 'db_table': "'sentry_projectcounter'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.deploy': {
'Meta': {'object_name': 'Deploy'},
'date_finished': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'sentry.dsymapp': {
'Meta': {'unique_together': "(('project', 'platform', 'app_id'),)", 'object_name': 'DSymApp'},
'app_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'sync_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'})
},
'sentry.dsymbundle': {
'Meta': {'object_name': 'DSymBundle'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymObject']"}),
'sdk': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymSDK']"})
},
'sentry.dsymobject': {
'Meta': {'object_name': 'DSymObject'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_path': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'db_index': 'True'}),
'vmaddr': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'vmsize': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'})
},
'sentry.dsymsdk': {
'Meta': {'object_name': 'DSymSDK', 'index_together': "[('version_major', 'version_minor', 'version_patchlevel', 'version_build')]"},
'dsym_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'sdk_name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'version_build': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'version_major': ('django.db.models.fields.IntegerField', [], {}),
'version_minor': ('django.db.models.fields.IntegerField', [], {}),
'version_patchlevel': ('django.db.models.fields.IntegerField', [], {})
},
'sentry.dsymsymbol': {
'Meta': {'unique_together': "[('object', 'address')]", 'object_name': 'DSymSymbol'},
'address': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymObject']"}),
'symbol': ('django.db.models.fields.TextField', [], {})
},
'sentry.environment': {
'Meta': {'unique_together': "(('project_id', 'name'),)", 'object_name': 'Environment'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'through': "orm['sentry.EnvironmentProject']", 'symmetrical': 'False'})
},
'sentry.environmentproject': {
'Meta': {'unique_together': "(('project', 'environment'),)", 'object_name': 'EnvironmentProject'},
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.event': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group_id', 'datetime'),)"},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventprocessingissue': {
'Meta': {'unique_together': "(('raw_event', 'processing_issue'),)", 'object_name': 'EventProcessingIssue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'processing_issue': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProcessingIssue']"}),
'raw_event': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.RawEvent']"})
},
'sentry.eventtag': {
'Meta': {'unique_together': "(('event_id', 'key_id', 'value_id'),)", 'object_name': 'EventTag', 'index_together': "(('project_id', 'key_id', 'value_id'), ('group_id', 'key_id', 'value_id'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {'unique_together': "(('project', 'ident'), ('project', 'hash'))", 'object_name': 'EventUser', 'index_together': "(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'sentry.file': {
'Meta': {'object_name': 'File'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'legacy_blob'", 'null': 'True', 'to': "orm['sentry.FileBlob']"}),
'blobs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.FileBlob']", 'through': "orm['sentry.FileBlobIndex']", 'symmetrical': 'False'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'headers': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.fileblob': {
'Meta': {'object_name': 'FileBlob'},
'checksum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'})
},
'sentry.fileblobindex': {
'Meta': {'unique_together': "(('file', 'blob', 'offset'),)", 'object_name': 'FileBlobIndex'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.globaldsymfile': {
'Meta': {'object_name': 'GlobalDSymFile'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'short_id'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'short_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupcommitresolution': {
'Meta': {'unique_together': "(('group_id', 'commit_id'),)", 'object_name': 'GroupCommitResolution'},
'commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.groupemailthread': {
'Meta': {'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'msgid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Project']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {'object_name': 'GroupRedirect'},
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'unique': 'True'})
},
'sentry.grouprelease': {
'Meta': {'unique_together': "(('group_id', 'release_id', 'environment'),)", 'object_name': 'GroupRelease'},
'environment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.groupresolution': {
'Meta': {'object_name': 'GroupResolution'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.groupsnooze': {
'Meta': {'object_name': 'GroupSnooze'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'until': ('django.db.models.fields.DateTimeField', [], {})
},
'sentry.groupsubscription': {
'Meta': {'unique_together': "(('group', 'user'),)", 'object_name': 'GroupSubscription'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Project']"}),
'reason': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('group', 'key', 'value'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'", 'index_together': "(('project', 'key', 'value', 'last_seen'),)"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationavatar': {
'Meta': {'object_name': 'OrganizationAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.Organization']"})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationonboardingtask': {
'Meta': {'unique_together': "(('organization', 'task'),)", 'object_name': 'OrganizationOnboardingTask'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.organizationoption': {
'Meta': {'unique_together': "(('organization', 'key'),)", 'object_name': 'OrganizationOption', 'db_table': "'sentry_organizationoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.processingissue': {
'Meta': {'unique_together': "(('project', 'checksum', 'type'),)", 'object_name': 'ProcessingIssue'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'first_event': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0', 'null': 'True'}),
'forced_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.projectbookmark': {
'Meta': {'unique_together': "(('project_id', 'user'),)", 'object_name': 'ProjectBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.projectdsymfile': {
'Meta': {'unique_together': "(('project', 'uuid'),)", 'object_name': 'ProjectDSymFile'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.projectplatform': {
'Meta': {'unique_together': "(('project_id', 'platform'),)", 'object_name': 'ProjectPlatform'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.rawevent': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'RawEvent'},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.release': {
'Meta': {'unique_together': "(('organization', 'version'),)", 'object_name': 'Release'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'blank': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'releases'", 'symmetrical': 'False', 'through': "orm['sentry.ReleaseProject']", 'to': "orm['sentry.Project']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.releasecommit': {
'Meta': {'unique_together': "(('release', 'commit'), ('release', 'order'))", 'object_name': 'ReleaseCommit'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseenvironment': {
'Meta': {'unique_together': "(('project_id', 'release_id', 'environment_id'),)", 'object_name': 'ReleaseEnvironment', 'db_table': "'sentry_environmentrelease'"},
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'},
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseproject': {
'Meta': {'unique_together': "(('project', 'release'),)", 'object_name': 'ReleaseProject', 'db_table': "'sentry_release_project'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.repository': {
'Meta': {'unique_together': "(('organization_id', 'name'), ('organization_id', 'provider', 'external_id'))", 'object_name': 'Repository'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.reprocessingreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'ReprocessingReport'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.savedsearch': {
'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'SavedSearch'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'SavedSearchUserDefault', 'db_table': "'sentry_savedsearch_userdefault'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'savedsearch': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.SavedSearch']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_password_expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_password_change': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_column': "'first_name'", 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'session_nonce': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useravatar': {
'Meta': {'object_name': 'UserAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.User']"})
},
'sentry.useremail': {
'Meta': {'unique_together': "(('user', 'email'),)", 'object_name': 'UserEmail'},
'date_hash_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'emails'", 'to': "orm['sentry.User']"}),
'validation_hash': ('django.db.models.fields.CharField', [], {'default': "u'MJgpqztt1JFwT0a7mqP7bJys66oGvuWt'", 'max_length': '32'})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'UserReport', 'index_together': "(('project', 'event_id'), ('project', 'date_added'))"},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.versiondsymfile': {
'Meta': {'unique_together': "(('dsym_file', 'version', 'build'),)", 'object_name': 'VersionDSymFile'},
'build': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'dsym_app': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymApp']"}),
'dsym_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProjectDSymFile']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '32'})
}
}
complete_apps = ['sentry'] |
DirtyUnicorns/android_external_chromium_org | refs/heads/lollipop | chrome/common/extensions/docs/server2/github_file_system_test.py | 97 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import sys
import unittest
from appengine_wrappers import files
from fake_fetchers import ConfigureFakeFetchers
from github_file_system import GithubFileSystem
from object_store_creator import ObjectStoreCreator
from test_util import Server2Path
class GithubFileSystemTest(unittest.TestCase):
def setUp(self):
ConfigureFakeFetchers()
self._base_path = Server2Path('test_data', 'github_file_system')
self._file_system = GithubFileSystem.CreateChromeAppsSamples(
ObjectStoreCreator.ForTest())
def _ReadLocalFile(self, filename):
with open(os.path.join(self._base_path, filename), 'r') as f:
return f.read()
def testList(self):
self.assertEqual(json.loads(self._ReadLocalFile('expected_list.json')),
self._file_system.Read(['']).Get())
def testRead(self):
self.assertEqual(self._ReadLocalFile('expected_read.txt'),
self._file_system.ReadSingle('analytics/launch.js').Get())
def testStat(self):
self.assertEqual(0, self._file_system.Stat('zipball').version)
def testKeyGeneration(self):
self.assertEqual(0, len(files.GetBlobKeys()))
self._file_system.ReadSingle('analytics/launch.js').Get()
self.assertEqual(1, len(files.GetBlobKeys()))
self._file_system.ReadSingle('analytics/main.css').Get()
self.assertEqual(1, len(files.GetBlobKeys()))
if __name__ == '__main__':
unittest.main()
|
silverbp/master-builder | refs/heads/master | setup.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from setuptools import find_packages
from setuptools import setup
install_requires = [
'GitPython >= 2.0.8, < 2.1',
'MarkupSafe >= 0.23, < 1.0',
'PyYAML >= 3.11, < 4.0',
'decorator >= 4.0.10, < 5.0',
'docker-py >= 1.9.0, < 2.0',
'jsonpath-rw >= 1.4.0, < 2.0',
'pluggy >= 0.3.1, < 1.0',
'ply >= 3.8, < 4.0',
'py >= 1.4.31, < 2.0',
'requests >= 2.11.0, < 3.0',
'six >= 1.10.0, < 2.0',
'websocket-client >= 0.37.0, < 1.0'
]
tests_require = [
'pytest',
]
setup(
name='Master-Builder Build System',
version='1.0.0',
description='A Universal/Plugable Build System',
url='https://github.com/silverbp/master-builder',
author='Silver Blueprints LLC',
license='Apache License 2.0',
packages=find_packages(exclude=['tests.*', 'tests']),
include_package_data=True,
test_suite='nose.collector',
install_requires=install_requires,
tests_require=tests_require,
entry_points="""
[console_scripts]
mb=mb.cli.main:main
""",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
],
)
|
humbletim/three.js | refs/heads/master | utils/exporters/blender/addons/io_three/exporter/_json.py | 296 | import json
from .. import constants
ROUND = constants.DEFAULT_PRECISION
## THREE override function
def _json_floatstr(o):
if ROUND is not None:
o = round(o, ROUND)
return '%g' % o
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
_key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
## HACK: hand-optimized bytecode; turn globals into locals
ValueError=ValueError,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
str=str,
tuple=tuple,
):
'''
Overwrite json.encoder for Python 2.7 and above to not
assign each index of a list or tuple to its own row as
this is completely asinine behaviour
'''
## @THREE
# Override the function
_floatstr = _json_floatstr
if _indent is not None and not isinstance(_indent, str):
_indent = ' ' * _indent
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
## @THREEJS
# - block the moronic functionality that puts each
# index on its own line causing insane row counts
#if _indent is not None:
# _current_indent_level += 1
# newline_indent = '\n' + _indent * _current_indent_level
# separator = _item_separator + newline_indent
# buf += newline_indent
#else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, str):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, int):
yield buf + str(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
else:
yield buf
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + _indent * _current_indent_level
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + _indent * _current_indent_level
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = sorted(dct.items(), key=lambda kv: kv[0])
else:
items = dct.items()
for key, value in items:
if isinstance(key, str):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = _floatstr(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif isinstance(key, int):
key = str(key)
elif _skipkeys:
continue
else:
raise TypeError("key " + repr(key) + " is not a string")
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, str):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, int):
yield str(value)
elif isinstance(value, float):
yield _floatstr(value)
else:
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + _indent * _current_indent_level
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, str):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, int):
yield str(o)
elif isinstance(o, float):
yield _floatstr(o)
elif isinstance(o, (list, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode
# override the encoder
json.encoder._make_iterencode = _make_iterencode
|
tskisner/pytoast | refs/heads/master | src/python/tests/map_satellite.py | 1 | # Copyright (c) 2015-2018 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
from ..mpi import MPI
from .mpi import MPITestCase
import sys
import os
import shutil
import numpy as np
import numpy.testing as nt
import healpy as hp
from ..tod.tod import *
from ..tod.pointing import *
from ..tod.sim_tod import *
from ..tod.sim_det_noise import *
from ..tod.sim_det_map import *
from ..tod.sim_noise import *
from ..map import *
from ._helpers import (create_outdir, create_distdata, boresight_focalplane,
uniform_chunks)
class MapSatelliteTest(MPITestCase):
def setUp(self):
fixture_name = os.path.splitext(os.path.basename(__file__))[0]
self.outdir = create_outdir(self.comm, fixture_name)
# Create one observation per group, and each observation will have
# one detector with multiple chunks distributed among the processes.
self.data = create_distdata(self.comm, obs_per_group=1)
self.ndet = 1
self.rate = 40.0
# Create detectors with white noise
self.NET = 7.0
dnames, dquat, depsilon, drate, dnet, dfmin, dfknee, dalpha = \
boresight_focalplane(self.ndet, samplerate=self.rate, net=self.NET,
fknee=0.0)
# Samples per observation
self.totsamp = 4000000
# Pixelization
self.sim_nside = 64
self.map_nside = 64
# Scan strategy
self.spinperiod = 10.0
self.spinangle = 30.0
self.precperiod = 50.0
self.precangle = 65.0
# in order to make sure that the noise realization is reproducible
# at all concurrencies, we set the chunksize to something independent
# of the number of ranks.
# Chunks
chunks = uniform_chunks(self.totsamp, nchunk=100)
# Populate the single observation per group
tod = TODSatellite(
self.data.comm.comm_group,
dquat,
self.totsamp,
detranks=1,
firsttime=0.0,
rate=self.rate,
spinperiod=self.spinperiod,
spinangle=self.spinangle,
precperiod=self.precperiod,
precangle=self.precangle,
sampsizes=chunks)
precquat = np.empty(4 * tod.local_samples[1],
dtype=np.float64).reshape((-1, 4))
slew_precession_axis(precquat, firstsamp=tod.local_samples[0],
samplerate=self.rate, degday=1.0)
tod.set_prec_axis(qprec=precquat)
# add analytic noise model with white noise
nse = AnalyticNoise(
rate=drate,
fmin=dfmin,
detectors=dnames,
fknee=dfknee,
alpha=dalpha,
NET=dnet
)
self.data.obs[0]["tod"] = tod
self.data.obs[0]["noise"] = nse
def test_boresight_null(self):
# verify that if all angles are zero, we get fixed pointing
# along the x-axis.
nsim = 1000
zaxis = np.array([0,0,1], dtype=np.float64)
borequat = np.empty(4*nsim, dtype=np.float64).reshape((-1, 4))
satellite_scanning(borequat, qprec=None, samplerate=100.0,
spinperiod=1.0, spinangle=0.0, precperiod=20.0,
precangle=0.0)
data = qa.rotate(borequat, np.tile(zaxis, nsim).reshape(-1,3))
np.testing.assert_almost_equal(data, np.tile(np.array([1.0, 0.0, 0.0]), nsim).reshape(-1,3))
return
def test_grad(self):
# add simple sky gradient signal
grad = OpSimGradient(nside=self.sim_nside, nest=True)
grad.exec(self.data)
# make a simple pointing matrix
pointing = OpPointingHpix(nside=self.map_nside, nest=True)
pointing.exec(self.data)
handle = None
if self.comm.rank == 0:
handle = open(os.path.join(self.outdir,"out_test_satellite_grad_info"), "w")
self.data.info(handle)
if self.comm.rank == 0:
handle.close()
# make a binned map with madam
madam_out = os.path.join(self.outdir, "madam_grad")
if self.comm.rank == 0:
if os.path.isdir(madam_out):
shutil.rmtree(madam_out)
os.mkdir(madam_out)
pars = {}
pars[ 'kfirst' ] = 'F'
pars[ 'base_first' ] = 1.0
pars[ 'fsample' ] = self.rate
pars[ 'nside_map' ] = self.map_nside
pars[ 'nside_cross' ] = self.map_nside
pars[ 'nside_submap' ] = 8
pars[ 'write_map' ] = 'F'
pars[ 'write_binmap' ] = 'T'
pars[ 'write_matrix' ] = 'F'
pars[ 'write_wcov' ] = 'F'
pars[ 'write_hits' ] = 'T'
pars[ 'kfilter' ] = 'F'
pars[ 'path_output' ] = madam_out
pars[ 'info' ] = 0
madam = OpMadam(params=pars, name='grad', purge=True)
if madam.available:
madam.exec(self.data)
if self.comm.rank == 0:
import matplotlib.pyplot as plt
hitsfile = os.path.join(madam_out, 'madam_hmap.fits')
hits = hp.read_map(hitsfile, nest=True)
outfile = "{}.png".format(hitsfile)
hp.mollview(hits, xsize=1600, nest=True)
plt.savefig(outfile)
plt.close()
binfile = os.path.join(madam_out, 'madam_bmap.fits')
bins = hp.read_map(binfile, nest=True)
outfile = "{}.png".format(binfile)
hp.mollview(bins, xsize=1600, nest=True)
plt.savefig(outfile)
plt.close()
# compare binned map to input signal
tothits = np.sum(hits)
nt.assert_equal(self.data.comm.ngroups * self.totsamp, tothits)
sig = grad.sigmap()
mask = (bins > -1.0e20)
nt.assert_almost_equal(bins[mask], sig[mask], decimal=4)
else:
print("libmadam not available, skipping tests")
return
def test_noise(self):
# generate noise timestreams from the noise model
nsig = OpSimNoise()
nsig.exec(self.data)
# make a simple pointing matrix
pointing = OpPointingHpix(nside=self.map_nside, nest=True)
pointing.exec(self.data)
handle = None
if self.comm.rank == 0:
handle = open(os.path.join(self.outdir,"out_test_satellite_noise_info"), "w")
self.data.info(handle)
if self.comm.rank == 0:
handle.close()
# For noise weighting in madam, we know we are using an analytic noise
# and so we can use noise weights based on the NET. This is instrument
# specific.
tod = self.data.obs[0]['tod']
nse = self.data.obs[0]['noise']
detweights = {}
for d in tod.local_dets:
detweights[d] = 1.0 / (self.rate * nse.NET(d)**2)
# make a binned map with madam
madam_out = os.path.join(self.outdir, "madam_noise")
if self.comm.rank == 0:
if os.path.isdir(madam_out):
shutil.rmtree(madam_out)
os.mkdir(madam_out)
pars = {}
pars[ 'kfirst' ] = 'F'
pars[ 'base_first' ] = 1.0
pars[ 'fsample' ] = self.rate
pars[ 'nside_map' ] = self.map_nside
pars[ 'nside_cross' ] = self.map_nside
pars[ 'nside_submap' ] = 8
pars[ 'write_map' ] = 'F'
pars[ 'write_binmap' ] = 'T'
pars[ 'write_matrix' ] = 'F'
pars[ 'write_wcov' ] = 'F'
pars[ 'write_hits' ] = 'T'
pars[ 'kfilter' ] = 'F'
pars[ 'path_output' ] = madam_out
pars[ 'info' ] = 0
madam = OpMadam(params=pars, detweights=detweights, name='noise')
if madam.available:
madam.exec(self.data)
if self.comm.rank == 0:
import matplotlib.pyplot as plt
hitsfile = os.path.join(madam_out, 'madam_hmap.fits')
hits = hp.read_map(hitsfile, nest=True)
outfile = "{}.png".format(hitsfile)
hp.mollview(hits, xsize=1600, nest=True)
plt.savefig(outfile)
plt.close()
binfile = os.path.join(madam_out, 'madam_bmap.fits')
bins = hp.read_map(binfile, nest=True)
outfile = "{}.png".format(binfile)
hp.mollview(bins, xsize=1600, nest=True)
plt.savefig(outfile)
plt.close()
# check that pixel rms makes sense given the
# number of hits and the timestream rms
tothits = np.sum(hits)
nt.assert_equal(self.data.comm.ngroups * self.totsamp, tothits)
#print("tothits = ", tothits)
mask = (bins > -1.0e20)
#print("num good pix = ", len(mask))
rthits = np.sqrt(hits[mask].astype(np.float64))
#print("rthits = ", rthits)
#print("bmap = ", bins[mask])
weighted = bins[mask] * rthits
pixrms = np.std(weighted)
todrms = self.NET * np.sqrt(self.rate)
relerr = np.absolute(pixrms - todrms) / todrms
#print("pixrms = ", pixrms)
#print("todrms = ", todrms)
#print("relerr = ", relerr)
self.assertTrue(relerr < 0.05)
else:
print("libmadam not available, skipping tests")
return
def test_scanmap(self):
# make a simple pointing matrix
pointing = OpPointingHpix(nside=self.map_nside, nest=True)
pointing.exec(self.data)
# get locally hit pixels
lc = OpLocalPixels()
localpix = lc.exec(self.data)
# construct a sky gradient operator, just to get the signal
# map- we are not going to use the operator on the data.
grad = OpSimGradient(nside=self.sim_nside, nest=True)
sig = grad.sigmap()
# pick a submap size and find the local submaps.
submapsize = np.floor_divide(self.sim_nside, 16)
localsm = np.unique(np.floor_divide(localpix, submapsize))
# construct a distributed map which has the gradient
npix = 12 * self.sim_nside * self.sim_nside
distsig = DistPixels(comm=self.data.comm.comm_group, size=npix, nnz=1,
dtype=np.float64, submap=submapsize, local=localsm)
lsub, lpix = distsig.global_to_local(localpix)
distsig.data[lsub,lpix,:] = \
np.array([ sig[x] for x in localpix ]).reshape(-1, 1)
# create TOD from map
scansim = OpSimScan(distmap=distsig)
scansim.exec(self.data)
handle = None
if self.comm.rank == 0:
handle = open(os.path.join(self.outdir,
"out_test_satellite_scanmap_info"), "w")
self.data.info(handle)
if self.comm.rank == 0:
handle.close()
# make a binned map with madam
madam_out = os.path.join(self.outdir, "madam_scansim")
if self.comm.rank == 0:
if os.path.isdir(madam_out):
shutil.rmtree(madam_out)
os.mkdir(madam_out)
pars = {}
pars[ 'kfirst' ] = 'F'
pars[ 'base_first' ] = 1.0
pars[ 'fsample' ] = self.rate
pars[ 'nside_map' ] = self.map_nside
pars[ 'nside_cross' ] = self.map_nside
pars[ 'nside_submap' ] = 8
pars[ 'write_map' ] = 'F'
pars[ 'write_binmap' ] = 'T'
pars[ 'write_matrix' ] = 'F'
pars[ 'write_wcov' ] = 'F'
pars[ 'write_hits' ] = 'T'
pars[ 'kfilter' ] = 'F'
pars[ 'path_output' ] = madam_out
pars[ 'info' ] = 0
madam = OpMadam(params=pars, name='scan')
if madam.available:
madam.exec(self.data)
if self.comm.rank == 0:
import matplotlib.pyplot as plt
hitsfile = os.path.join(madam_out, 'madam_hmap.fits')
hits = hp.read_map(hitsfile, nest=True)
outfile = "{}.png".format(hitsfile)
hp.mollview(hits, xsize=1600, nest=True)
plt.savefig(outfile)
plt.close()
binfile = os.path.join(madam_out, 'madam_bmap.fits')
bins = hp.read_map(binfile, nest=True)
outfile = "{}.png".format(binfile)
hp.mollview(bins, xsize=1600, nest=True)
plt.savefig(outfile)
plt.close()
# compare binned map to input signal
tothits = np.sum(hits)
nt.assert_equal(self.data.comm.ngroups * self.totsamp, tothits)
mask = (bins > -1.0e20)
nt.assert_almost_equal(bins[mask], sig[mask], decimal=4)
else:
print("libmadam not available, skipping tests")
return
def test_hwpfast(self):
# make a pointing matrix with a HWP that rotates 2*PI every sample
hwprate = self.rate * 60.0
pointing = OpPointingHpix(nside=self.map_nside, nest=True,
hwprpm=hwprate)
pointing.exec(self.data)
# get locally hit pixels
lc = OpLocalPixels()
localpix = lc.exec(self.data)
# construct a sky gradient operator, just to get the signal
# map- we are not going to use the operator on the data.
grad = OpSimGradient(nside=self.sim_nside, nest=True)
sig = grad.sigmap()
# pick a submap size and find the local submaps.
submapsize = np.floor_divide(self.sim_nside, 16)
localsm = np.unique(np.floor_divide(localpix, submapsize))
# construct a distributed map which has the gradient
npix = 12 * self.sim_nside * self.sim_nside
distsig = DistPixels(comm=self.data.comm.comm_group, size=npix, nnz=1,
dtype=np.float64, submap=submapsize, local=localsm)
lsub, lpix = distsig.global_to_local(localpix)
distsig.data[lsub,lpix,:] = \
np.array([ sig[x] for x in localpix ]).reshape(-1, 1)
# create TOD from map
scansim = OpSimScan(distmap=distsig)
scansim.exec(self.data)
handle = None
if self.comm.rank == 0:
handle = open(os.path.join(self.outdir,
"out_test_satellite_hwpfast_info"), "w")
self.data.info(handle)
if self.comm.rank == 0:
handle.close()
# make a binned map with madam
madam_out = os.path.join(self.outdir, "madam_hwpfast")
if self.comm.rank == 0:
if os.path.isdir(madam_out):
shutil.rmtree(madam_out)
os.mkdir(madam_out)
pars = {}
pars[ 'kfirst' ] = 'F'
pars[ 'base_first' ] = 1.0
pars[ 'fsample' ] = self.rate
pars[ 'nside_map' ] = self.map_nside
pars[ 'nside_cross' ] = self.map_nside
pars[ 'nside_submap' ] = 8
pars[ 'write_map' ] = 'F'
pars[ 'write_binmap' ] = 'T'
pars[ 'write_matrix' ] = 'F'
pars[ 'write_wcov' ] = 'F'
pars[ 'write_hits' ] = 'T'
pars[ 'kfilter' ] = 'F'
pars[ 'path_output' ] = madam_out
pars[ 'info' ] = 0
madam = OpMadam(params=pars, name='scan')
if madam.available:
madam.exec(self.data)
if self.comm.rank == 0:
import matplotlib.pyplot as plt
hitsfile = os.path.join(madam_out, 'madam_hmap.fits')
hits = hp.read_map(hitsfile, nest=True)
outfile = "{}.png".format(hitsfile)
hp.mollview(hits, xsize=1600, nest=True)
plt.savefig(outfile)
plt.close()
binfile = os.path.join(madam_out, 'madam_bmap.fits')
bins = hp.read_map(binfile, nest=True)
outfile = "{}.png".format(binfile)
hp.mollview(bins, xsize=1600, nest=True)
plt.savefig(outfile)
plt.close()
# compare binned map to input signal
tothits = np.sum(hits)
nt.assert_equal(self.data.comm.ngroups * self.totsamp, tothits)
mask = (bins > -1.0e20)
nt.assert_almost_equal(bins[mask], sig[mask], decimal=4)
else:
print("libmadam not available, skipping tests")
return
def test_hwpconst(self):
# make a pointing matrix with a HWP that is constant
hwpstep = 2.0 * np.pi
hwpsteptime = (self.totsamp / self.rate) / 60.0
pointing = OpPointingHpix(nside=self.map_nside, nest=True,
hwpstep=hwpstep, hwpsteptime=hwpsteptime)
pointing.exec(self.data)
# get locally hit pixels
lc = OpLocalPixels()
localpix = lc.exec(self.data)
# construct a sky gradient operator, just to get the signal
# map- we are not going to use the operator on the data.
grad = OpSimGradient(nside=self.sim_nside, nest=True)
sig = grad.sigmap()
# pick a submap size and find the local submaps.
submapsize = np.floor_divide(self.sim_nside, 16)
localsm = np.unique(np.floor_divide(localpix, submapsize))
# construct a distributed map which has the gradient
npix = 12 * self.sim_nside * self.sim_nside
distsig = DistPixels(comm=self.data.comm.comm_group, size=npix, nnz=1,
dtype=np.float64, submap=submapsize, local=localsm)
lsub, lpix = distsig.global_to_local(localpix)
distsig.data[lsub,lpix,:] = \
np.array([ sig[x] for x in localpix ]).reshape(-1, 1)
# create TOD from map
scansim = OpSimScan(distmap=distsig)
scansim.exec(self.data)
handle = None
if self.comm.rank == 0:
handle = open(os.path.join(self.outdir,
"out_test_satellite_hwpconst_info"), "w")
self.data.info(handle)
if self.comm.rank == 0:
handle.close()
# make a binned map with madam
madam_out = os.path.join(self.outdir, "madam_hwpconst")
if self.comm.rank == 0:
if os.path.isdir(madam_out):
shutil.rmtree(madam_out)
os.mkdir(madam_out)
pars = {}
pars[ 'kfirst' ] = 'F'
pars[ 'base_first' ] = 1.0
pars[ 'fsample' ] = self.rate
pars[ 'nside_map' ] = self.map_nside
pars[ 'nside_cross' ] = self.map_nside
pars[ 'nside_submap' ] = 8
pars[ 'write_map' ] = 'F'
pars[ 'write_binmap' ] = 'T'
pars[ 'write_matrix' ] = 'F'
pars[ 'write_wcov' ] = 'F'
pars[ 'write_hits' ] = 'T'
pars[ 'kfilter' ] = 'F'
pars[ 'path_output' ] = madam_out
pars[ 'info' ] = 0
madam = OpMadam(params=pars, name='scan')
if madam.available:
madam.exec(self.data)
if self.comm.rank == 0:
import matplotlib.pyplot as plt
hitsfile = os.path.join(madam_out, 'madam_hmap.fits')
hits = hp.read_map(hitsfile, nest=True)
outfile = "{}.png".format(hitsfile)
hp.mollview(hits, xsize=1600, nest=True)
plt.savefig(outfile)
plt.close()
binfile = os.path.join(madam_out, 'madam_bmap.fits')
bins = hp.read_map(binfile, nest=True)
outfile = "{}.png".format(binfile)
hp.mollview(bins, xsize=1600, nest=True)
plt.savefig(outfile)
plt.close()
# compare binned map to input signal
tothits = np.sum(hits)
nt.assert_equal(self.data.comm.ngroups * self.totsamp, tothits)
mask = (bins > -1.0e20)
nt.assert_almost_equal(bins[mask], sig[mask], decimal=4)
else:
print("libmadam not available, skipping tests")
return
|
UK992/servo | refs/heads/master | tests/wpt/web-platform-tests/webdriver/tests/conftest.py | 40 | pytest_plugins = "tests.support.fixtures"
|
RedhawkSDR/integration-gnuhawk | refs/heads/master | components/vector_to_streams_cc_4o/tests/test_vector_to_streams_cc_4o.py | 1 | #!/usr/bin/env python
#
# This file is protected by Copyright. Please refer to the COPYRIGHT file
# distributed with this source distribution.
#
# This file is part of GNUHAWK.
#
# GNUHAWK is free software: you can redistribute it and/or modify is under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# GNUHAWK is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see http://www.gnu.org/licenses/.
#
import unittest
import ossie.utils.testing
import os
from omniORB import any
class ComponentTests(ossie.utils.testing.ScaComponentTestCase):
"""Test for all component implementations in vector_to_streams_cc_4o"""
def testScaBasicBehavior(self):
#######################################################################
# Launch the component with the default execparams
execparams = self.getPropertySet(kinds=("execparam",), modes=("readwrite", "writeonly"), includeNil=False)
execparams = dict([(x.id, any.from_any(x.value)) for x in execparams])
self.launch(execparams)
#######################################################################
# Verify the basic state of the component
self.assertNotEqual(self.comp, None)
self.assertEqual(self.comp.ref._non_existent(), False)
self.assertEqual(self.comp.ref._is_a("IDL:CF/Resource:1.0"), True)
#######################################################################
# Validate that query returns all expected parameters
# Query of '[]' should return the following set of properties
expectedProps = []
expectedProps.extend(self.getPropertySet(kinds=("configure", "execparam"), modes=("readwrite", "readonly"), includeNil=True))
expectedProps.extend(self.getPropertySet(kinds=("allocate",), action="external", includeNil=True))
props = self.comp.query([])
props = dict((x.id, any.from_any(x.value)) for x in props)
# Query may return more than expected, but not less
for expectedProp in expectedProps:
self.assertEquals(props.has_key(expectedProp.id), True)
#######################################################################
# Verify that all expected ports are available
for port in self.scd.get_componentfeatures().get_ports().get_uses():
port_obj = self.comp.getPort(str(port.get_usesname()))
self.assertNotEqual(port_obj, None)
self.assertEqual(port_obj._non_existent(), False)
self.assertEqual(port_obj._is_a("IDL:CF/Port:1.0"), True)
for port in self.scd.get_componentfeatures().get_ports().get_provides():
port_obj = self.comp.getPort(str(port.get_providesname()))
self.assertNotEqual(port_obj, None)
self.assertEqual(port_obj._non_existent(), False)
self.assertEqual(port_obj._is_a(port.get_repid()), True)
#######################################################################
# Make sure start and stop can be called without throwing exceptions
self.comp.start()
self.comp.stop()
#######################################################################
# Simulate regular component shutdown
self.comp.releaseObject()
# TODO Add additional tests here
#
# See:
# ossie.utils.bulkio.bulkio_helpers,
# ossie.utils.bluefile.bluefile_helpers
# for modules that will assist with testing components with BULKIO ports
if __name__ == "__main__":
ossie.utils.testing.main("../vector_to_streams_cc_4o.spd.xml") # By default tests all implementations
|
tkurnosova/selenium | refs/heads/master | py/test/selenium/webdriver/chrome/__init__.py | 2454 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
|
Francis-Liu/animated-broccoli | refs/heads/master | nova/db/sqlalchemy/models.py | 13 | # Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SQLAlchemy models for nova data.
"""
from oslo_config import cfg
from oslo_db.sqlalchemy import models
from oslo_utils import timeutils
from sqlalchemy import (Column, Index, Integer, BigInteger, Enum, String,
schema, Unicode)
from sqlalchemy.dialects.mysql import MEDIUMTEXT
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import orm
from sqlalchemy import ForeignKey, DateTime, Boolean, Text, Float
from nova.db.sqlalchemy import types
CONF = cfg.CONF
BASE = declarative_base()
def MediumText():
return Text().with_variant(MEDIUMTEXT(), 'mysql')
class NovaBase(models.SoftDeleteMixin,
models.TimestampMixin,
models.ModelBase):
metadata = None
def __copy__(self):
"""Implement a safe copy.copy().
SQLAlchemy-mapped objects travel with an object
called an InstanceState, which is pegged to that object
specifically and tracks everything about that object. It's
critical within all attribute operations, including gets
and deferred loading. This object definitely cannot be
shared among two instances, and must be handled.
The copy routine here makes use of session.merge() which
already essentially implements a "copy" style of operation,
which produces a new instance with a new InstanceState and copies
all the data along mapped attributes without using any SQL.
The mode we are using here has the caveat that the given object
must be "clean", e.g. that it has no database-loaded state
that has been updated and not flushed. This is a good thing,
as creating a copy of an object including non-flushed, pending
database state is probably not a good idea; neither represents
what the actual row looks like, and only one should be flushed.
"""
session = orm.Session()
copy = session.merge(self, load=False)
session.expunge(copy)
return copy
def save(self, session=None):
from nova.db.sqlalchemy import api
if session is None:
session = api.get_session()
super(NovaBase, self).save(session=session)
class Service(BASE, NovaBase):
"""Represents a running service on a host."""
__tablename__ = 'services'
__table_args__ = (
schema.UniqueConstraint("host", "topic", "deleted",
name="uniq_services0host0topic0deleted"),
schema.UniqueConstraint("host", "binary", "deleted",
name="uniq_services0host0binary0deleted")
)
id = Column(Integer, primary_key=True)
host = Column(String(255)) # , ForeignKey('hosts.id'))
binary = Column(String(255))
topic = Column(String(255))
report_count = Column(Integer, nullable=False, default=0)
disabled = Column(Boolean, default=False)
disabled_reason = Column(String(255))
last_seen_up = Column(DateTime, nullable=True)
forced_down = Column(Boolean, default=False)
version = Column(Integer, default=0)
class ComputeNode(BASE, NovaBase):
"""Represents a running compute service on a host."""
__tablename__ = 'compute_nodes'
__table_args__ = (
schema.UniqueConstraint(
'host', 'hypervisor_hostname', 'deleted',
name="uniq_compute_nodes0host0hypervisor_hostname0deleted"),
)
id = Column(Integer, primary_key=True)
service_id = Column(Integer, nullable=True)
# FIXME(sbauza: Host field is nullable because some old Juno compute nodes
# can still report stats from an old ResourceTracker without setting this
# field.
# This field has to be set non-nullable in a later cycle (probably Lxxx)
# once we are sure that all compute nodes in production report it.
host = Column(String(255), nullable=True)
vcpus = Column(Integer, nullable=False)
memory_mb = Column(Integer, nullable=False)
local_gb = Column(Integer, nullable=False)
vcpus_used = Column(Integer, nullable=False)
memory_mb_used = Column(Integer, nullable=False)
local_gb_used = Column(Integer, nullable=False)
hypervisor_type = Column(MediumText(), nullable=False)
hypervisor_version = Column(Integer, nullable=False)
hypervisor_hostname = Column(String(255))
# Free Ram, amount of activity (resize, migration, boot, etc) and
# the number of running VM's are a good starting point for what's
# important when making scheduling decisions.
free_ram_mb = Column(Integer)
free_disk_gb = Column(Integer)
current_workload = Column(Integer)
running_vms = Column(Integer)
# Note(masumotok): Expected Strings example:
#
# '{"arch":"x86_64",
# "model":"Nehalem",
# "topology":{"sockets":1, "threads":2, "cores":3},
# "features":["tdtscp", "xtpr"]}'
#
# Points are "json translatable" and it must have all dictionary keys
# above, since it is copied from <cpu> tag of getCapabilities()
# (See libvirt.virtConnection).
cpu_info = Column(MediumText(), nullable=False)
disk_available_least = Column(Integer)
host_ip = Column(types.IPAddress())
supported_instances = Column(Text)
metrics = Column(Text)
# Note(yongli): json string PCI Stats
# '{"vendor_id":"8086", "product_id":"1234", "count":3 }'
pci_stats = Column(Text)
# extra_resources is a json string containing arbitrary
# data about additional resources.
extra_resources = Column(Text)
# json-encode string containing compute node statistics
stats = Column(Text, default='{}')
# json-encoded dict that contains NUMA topology as generated by
# objects.NUMATopoloogy._to_json()
numa_topology = Column(Text)
# allocation ratios provided by the RT
ram_allocation_ratio = Column(Float, nullable=True)
cpu_allocation_ratio = Column(Float, nullable=True)
class Certificate(BASE, NovaBase):
"""Represents a x509 certificate."""
__tablename__ = 'certificates'
__table_args__ = (
Index('certificates_project_id_deleted_idx', 'project_id', 'deleted'),
Index('certificates_user_id_deleted_idx', 'user_id', 'deleted')
)
id = Column(Integer, primary_key=True)
user_id = Column(String(255))
project_id = Column(String(255))
file_name = Column(String(255))
class Instance(BASE, NovaBase):
"""Represents a guest VM."""
__tablename__ = 'instances'
__table_args__ = (
Index('uuid', 'uuid', unique=True),
Index('instances_project_id_deleted_idx',
'project_id', 'deleted'),
Index('instances_reservation_id_idx',
'reservation_id'),
Index('instances_terminated_at_launched_at_idx',
'terminated_at', 'launched_at'),
Index('instances_uuid_deleted_idx',
'uuid', 'deleted'),
Index('instances_task_state_updated_at_idx',
'task_state', 'updated_at'),
Index('instances_host_node_deleted_idx',
'host', 'node', 'deleted'),
Index('instances_host_deleted_cleaned_idx',
'host', 'deleted', 'cleaned'),
schema.UniqueConstraint('uuid', name='uniq_instances0uuid'),
)
injected_files = []
id = Column(Integer, primary_key=True, autoincrement=True)
@property
def name(self):
try:
base_name = CONF.instance_name_template % self.id
except TypeError:
# Support templates like "uuid-%(uuid)s", etc.
info = {}
# NOTE(russellb): Don't use self.iteritems() here, as it will
# result in infinite recursion on the name property.
for column in iter(orm.object_mapper(self).columns):
key = column.name
# prevent recursion if someone specifies %(name)s
# %(name)s will not be valid.
if key == 'name':
continue
info[key] = self[key]
try:
base_name = CONF.instance_name_template % info
except KeyError:
base_name = self.uuid
return base_name
@property
def _extra_keys(self):
return ['name']
user_id = Column(String(255))
project_id = Column(String(255))
image_ref = Column(String(255))
kernel_id = Column(String(255))
ramdisk_id = Column(String(255))
hostname = Column(String(255))
launch_index = Column(Integer)
key_name = Column(String(255))
key_data = Column(MediumText())
power_state = Column(Integer)
vm_state = Column(String(255))
task_state = Column(String(255))
memory_mb = Column(Integer)
vcpus = Column(Integer)
root_gb = Column(Integer)
ephemeral_gb = Column(Integer)
ephemeral_key_uuid = Column(String(36))
# This is not related to hostname, above. It refers
# to the nova node.
host = Column(String(255)) # , ForeignKey('hosts.id'))
# To identify the "ComputeNode" which the instance resides in.
# This equals to ComputeNode.hypervisor_hostname.
node = Column(String(255))
# *not* flavorid, this is the internal primary_key
instance_type_id = Column(Integer)
user_data = Column(MediumText())
reservation_id = Column(String(255))
# NOTE(sbiswas7): 'scheduled_at' is still in the database
# and can be removed in the future release.
launched_at = Column(DateTime)
terminated_at = Column(DateTime)
# This always refers to the availability_zone kwarg passed in /servers and
# provided as an API option, not at all related to the host AZ the instance
# belongs to.
availability_zone = Column(String(255))
# User editable field for display in user-facing UIs
display_name = Column(String(255))
display_description = Column(String(255))
# To remember on which host an instance booted.
# An instance may have moved to another host by live migration.
launched_on = Column(MediumText())
# NOTE(jdillaman): locked deprecated in favor of locked_by,
# to be removed in Icehouse
locked = Column(Boolean)
locked_by = Column(Enum('owner', 'admin'))
os_type = Column(String(255))
architecture = Column(String(255))
vm_mode = Column(String(255))
uuid = Column(String(36), nullable=False)
root_device_name = Column(String(255))
default_ephemeral_device = Column(String(255))
default_swap_device = Column(String(255))
config_drive = Column(String(255))
# User editable field meant to represent what ip should be used
# to connect to the instance
access_ip_v4 = Column(types.IPAddress())
access_ip_v6 = Column(types.IPAddress())
auto_disk_config = Column(Boolean())
progress = Column(Integer)
# EC2 instance_initiated_shutdown_terminate
# True: -> 'terminate'
# False: -> 'stop'
# Note(maoy): currently Nova will always stop instead of terminate
# no matter what the flag says. So we set the default to False.
shutdown_terminate = Column(Boolean(), default=False)
# EC2 disable_api_termination
disable_terminate = Column(Boolean(), default=False)
# OpenStack compute cell name. This will only be set at the top of
# the cells tree and it'll be a full cell name such as 'api!hop1!hop2'
cell_name = Column(String(255))
internal_id = Column(Integer)
# Records whether an instance has been deleted from disk
cleaned = Column(Integer, default=0)
class InstanceInfoCache(BASE, NovaBase):
"""Represents a cache of information about an instance
"""
__tablename__ = 'instance_info_caches'
__table_args__ = (
schema.UniqueConstraint(
"instance_uuid",
name="uniq_instance_info_caches0instance_uuid"),)
id = Column(Integer, primary_key=True, autoincrement=True)
# text column used for storing a json object of network data for api
network_info = Column(MediumText())
instance_uuid = Column(String(36), ForeignKey('instances.uuid'),
nullable=False)
instance = orm.relationship(Instance,
backref=orm.backref('info_cache', uselist=False),
foreign_keys=instance_uuid,
primaryjoin=instance_uuid == Instance.uuid)
class InstanceExtra(BASE, NovaBase):
__tablename__ = 'instance_extra'
__table_args__ = (
Index('instance_extra_idx', 'instance_uuid'),)
id = Column(Integer, primary_key=True, autoincrement=True)
instance_uuid = Column(String(36), ForeignKey('instances.uuid'),
nullable=False)
numa_topology = orm.deferred(Column(Text))
pci_requests = orm.deferred(Column(Text))
flavor = orm.deferred(Column(Text))
vcpu_model = orm.deferred(Column(Text))
migration_context = orm.deferred(Column(Text))
instance = orm.relationship(Instance,
backref=orm.backref('extra',
uselist=False),
foreign_keys=instance_uuid,
primaryjoin=instance_uuid == Instance.uuid)
class InstanceTypes(BASE, NovaBase):
"""Represents possible flavors for instances.
Note: instance_type and flavor are synonyms and the term instance_type is
deprecated and in the process of being removed.
"""
__tablename__ = "instance_types"
__table_args__ = (
schema.UniqueConstraint("flavorid", "deleted",
name="uniq_instance_types0flavorid0deleted"),
schema.UniqueConstraint("name", "deleted",
name="uniq_instance_types0name0deleted")
)
# Internal only primary key/id
id = Column(Integer, primary_key=True)
name = Column(String(255))
memory_mb = Column(Integer, nullable=False)
vcpus = Column(Integer, nullable=False)
root_gb = Column(Integer)
ephemeral_gb = Column(Integer)
# Public facing id will be renamed public_id
flavorid = Column(String(255))
swap = Column(Integer, nullable=False, default=0)
rxtx_factor = Column(Float, default=1)
vcpu_weight = Column(Integer)
disabled = Column(Boolean, default=False)
is_public = Column(Boolean, default=True)
class Quota(BASE, NovaBase):
"""Represents a single quota override for a project.
If there is no row for a given project id and resource, then the
default for the quota class is used. If there is no row for a
given quota class and resource, then the default for the
deployment is used. If the row is present but the hard limit is
Null, then the resource is unlimited.
"""
__tablename__ = 'quotas'
__table_args__ = (
schema.UniqueConstraint("project_id", "resource", "deleted",
name="uniq_quotas0project_id0resource0deleted"
),
)
id = Column(Integer, primary_key=True)
project_id = Column(String(255))
resource = Column(String(255), nullable=False)
hard_limit = Column(Integer)
class ProjectUserQuota(BASE, NovaBase):
"""Represents a single quota override for a user with in a project."""
__tablename__ = 'project_user_quotas'
uniq_name = "uniq_project_user_quotas0user_id0project_id0resource0deleted"
__table_args__ = (
schema.UniqueConstraint("user_id", "project_id", "resource", "deleted",
name=uniq_name),
Index('project_user_quotas_project_id_deleted_idx',
'project_id', 'deleted'),
Index('project_user_quotas_user_id_deleted_idx',
'user_id', 'deleted')
)
id = Column(Integer, primary_key=True, nullable=False)
project_id = Column(String(255), nullable=False)
user_id = Column(String(255), nullable=False)
resource = Column(String(255), nullable=False)
hard_limit = Column(Integer)
class QuotaClass(BASE, NovaBase):
"""Represents a single quota override for a quota class.
If there is no row for a given quota class and resource, then the
default for the deployment is used. If the row is present but the
hard limit is Null, then the resource is unlimited.
"""
__tablename__ = 'quota_classes'
__table_args__ = (
Index('ix_quota_classes_class_name', 'class_name'),
)
id = Column(Integer, primary_key=True)
class_name = Column(String(255))
resource = Column(String(255))
hard_limit = Column(Integer)
class QuotaUsage(BASE, NovaBase):
"""Represents the current usage for a given resource."""
__tablename__ = 'quota_usages'
__table_args__ = (
Index('ix_quota_usages_project_id', 'project_id'),
Index('ix_quota_usages_user_id_deleted', 'user_id', 'deleted'),
)
id = Column(Integer, primary_key=True)
project_id = Column(String(255))
user_id = Column(String(255))
resource = Column(String(255), nullable=False)
in_use = Column(Integer, nullable=False)
reserved = Column(Integer, nullable=False)
@property
def total(self):
return self.in_use + self.reserved
until_refresh = Column(Integer)
class Reservation(BASE, NovaBase):
"""Represents a resource reservation for quotas."""
__tablename__ = 'reservations'
__table_args__ = (
Index('ix_reservations_project_id', 'project_id'),
Index('reservations_uuid_idx', 'uuid'),
Index('reservations_deleted_expire_idx', 'deleted', 'expire'),
Index('ix_reservations_user_id_deleted', 'user_id', 'deleted'),
)
id = Column(Integer, primary_key=True, nullable=False)
uuid = Column(String(36), nullable=False)
usage_id = Column(Integer, ForeignKey('quota_usages.id'), nullable=False)
project_id = Column(String(255))
user_id = Column(String(255))
resource = Column(String(255))
delta = Column(Integer, nullable=False)
expire = Column(DateTime)
usage = orm.relationship(
"QuotaUsage",
foreign_keys=usage_id,
primaryjoin='and_(Reservation.usage_id == QuotaUsage.id,'
'QuotaUsage.deleted == 0)')
class Snapshot(BASE, NovaBase):
"""Represents a block storage device that can be attached to a VM."""
__tablename__ = 'snapshots'
__table_args__ = ()
id = Column(String(36), primary_key=True, nullable=False)
deleted = Column(String(36), default="")
@property
def name(self):
return CONF.snapshot_name_template % self.id
@property
def volume_name(self):
return CONF.volume_name_template % self.volume_id
user_id = Column(String(255))
project_id = Column(String(255))
volume_id = Column(String(36), nullable=False)
status = Column(String(255))
progress = Column(String(255))
volume_size = Column(Integer)
scheduled_at = Column(DateTime)
display_name = Column(String(255))
display_description = Column(String(255))
class BlockDeviceMapping(BASE, NovaBase):
"""Represents block device mapping that is defined by EC2."""
__tablename__ = "block_device_mapping"
__table_args__ = (
Index('snapshot_id', 'snapshot_id'),
Index('volume_id', 'volume_id'),
Index('block_device_mapping_instance_uuid_device_name_idx',
'instance_uuid', 'device_name'),
Index('block_device_mapping_instance_uuid_volume_id_idx',
'instance_uuid', 'volume_id'),
Index('block_device_mapping_instance_uuid_idx', 'instance_uuid'),
)
id = Column(Integer, primary_key=True, autoincrement=True)
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
instance = orm.relationship(Instance,
backref=orm.backref('block_device_mapping'),
foreign_keys=instance_uuid,
primaryjoin='and_(BlockDeviceMapping.'
'instance_uuid=='
'Instance.uuid,'
'BlockDeviceMapping.deleted=='
'0)')
source_type = Column(String(255))
destination_type = Column(String(255))
guest_format = Column(String(255))
device_type = Column(String(255))
disk_bus = Column(String(255))
boot_index = Column(Integer)
device_name = Column(String(255))
# default=False for compatibility of the existing code.
# With EC2 API,
# default True for ami specified device.
# default False for created with other timing.
# TODO(sshturm) add default in db
delete_on_termination = Column(Boolean, default=False)
snapshot_id = Column(String(36))
volume_id = Column(String(36))
volume_size = Column(Integer)
image_id = Column(String(36))
# for no device to suppress devices.
no_device = Column(Boolean)
connection_info = Column(MediumText())
class SecurityGroupInstanceAssociation(BASE, NovaBase):
__tablename__ = 'security_group_instance_association'
__table_args__ = (
Index('security_group_instance_association_instance_uuid_idx',
'instance_uuid'),
)
id = Column(Integer, primary_key=True, nullable=False)
security_group_id = Column(Integer, ForeignKey('security_groups.id'))
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
class SecurityGroup(BASE, NovaBase):
"""Represents a security group."""
__tablename__ = 'security_groups'
__table_args__ = (
schema.UniqueConstraint('project_id', 'name', 'deleted',
name='uniq_security_groups0project_id0'
'name0deleted'),
)
id = Column(Integer, primary_key=True)
name = Column(String(255))
description = Column(String(255))
user_id = Column(String(255))
project_id = Column(String(255))
instances = orm.relationship(Instance,
secondary="security_group_instance_association",
primaryjoin='and_('
'SecurityGroup.id == '
'SecurityGroupInstanceAssociation.security_group_id,'
'SecurityGroupInstanceAssociation.deleted == 0,'
'SecurityGroup.deleted == 0)',
secondaryjoin='and_('
'SecurityGroupInstanceAssociation.instance_uuid == Instance.uuid,'
# (anthony) the condition below shouldn't be necessary now that the
# association is being marked as deleted. However, removing this
# may cause existing deployments to choke, so I'm leaving it
'Instance.deleted == 0)',
backref='security_groups')
class SecurityGroupIngressRule(BASE, NovaBase):
"""Represents a rule in a security group."""
__tablename__ = 'security_group_rules'
__table_args__ = ()
id = Column(Integer, primary_key=True)
parent_group_id = Column(Integer, ForeignKey('security_groups.id'))
parent_group = orm.relationship("SecurityGroup", backref="rules",
foreign_keys=parent_group_id,
primaryjoin='and_('
'SecurityGroupIngressRule.parent_group_id == SecurityGroup.id,'
'SecurityGroupIngressRule.deleted == 0)')
protocol = Column(String(255))
from_port = Column(Integer)
to_port = Column(Integer)
cidr = Column(types.CIDR())
# Note: This is not the parent SecurityGroup. It's SecurityGroup we're
# granting access for.
group_id = Column(Integer, ForeignKey('security_groups.id'))
grantee_group = orm.relationship("SecurityGroup",
foreign_keys=group_id,
primaryjoin='and_('
'SecurityGroupIngressRule.group_id == SecurityGroup.id,'
'SecurityGroupIngressRule.deleted == 0)')
class SecurityGroupIngressDefaultRule(BASE, NovaBase):
__tablename__ = 'security_group_default_rules'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False)
protocol = Column(String(5)) # "tcp", "udp" or "icmp"
from_port = Column(Integer)
to_port = Column(Integer)
cidr = Column(types.CIDR())
class ProviderFirewallRule(BASE, NovaBase):
"""Represents a rule in a security group."""
__tablename__ = 'provider_fw_rules'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False)
protocol = Column(String(5)) # "tcp", "udp", or "icmp"
from_port = Column(Integer)
to_port = Column(Integer)
cidr = Column(types.CIDR())
class KeyPair(BASE, NovaBase):
"""Represents a public key pair for ssh / WinRM."""
__tablename__ = 'key_pairs'
__table_args__ = (
schema.UniqueConstraint("user_id", "name", "deleted",
name="uniq_key_pairs0user_id0name0deleted"),
)
id = Column(Integer, primary_key=True, nullable=False)
name = Column(String(255), nullable=False)
user_id = Column(String(255))
fingerprint = Column(String(255))
public_key = Column(MediumText())
type = Column(Enum('ssh', 'x509', name='keypair_types'),
nullable=False, server_default='ssh')
class Migration(BASE, NovaBase):
"""Represents a running host-to-host migration."""
__tablename__ = 'migrations'
__table_args__ = (
Index('migrations_instance_uuid_and_status_idx', 'deleted',
'instance_uuid', 'status'),
# MySQL has a limit of 3072 bytes for an multi-column index. This
# index ends up being larger than that using the utf-8 encoding.
# Limiting the index to the prefixes will keep it under the limit.
# FIXME(johannes): Is it MySQL or InnoDB that imposes the limit?
Index('migrations_by_host_nodes_and_status_idx', 'deleted',
'source_compute', 'dest_compute', 'source_node', 'dest_node',
'status', mysql_length={'source_compute': 100,
'dest_compute': 100,
'source_node': 100,
'dest_node': 100}),
)
id = Column(Integer, primary_key=True, nullable=False)
# NOTE(tr3buchet): the ____compute variables are instance['host']
source_compute = Column(String(255))
dest_compute = Column(String(255))
# nodes are equivalent to a compute node's 'hypervisor_hostname'
source_node = Column(String(255))
dest_node = Column(String(255))
# NOTE(tr3buchet): dest_host, btw, is an ip address
dest_host = Column(String(255))
old_instance_type_id = Column(Integer())
new_instance_type_id = Column(Integer())
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
# TODO(_cerberus_): enum
status = Column(String(255))
migration_type = Column(Enum('migration', 'resize', 'live-migration',
'evacuation'),
nullable=True)
hidden = Column(Boolean, default=False)
instance = orm.relationship("Instance", foreign_keys=instance_uuid,
primaryjoin='and_(Migration.instance_uuid == '
'Instance.uuid, Instance.deleted == '
'0)')
class Network(BASE, NovaBase):
"""Represents a network."""
__tablename__ = 'networks'
__table_args__ = (
schema.UniqueConstraint("vlan", "deleted",
name="uniq_networks0vlan0deleted"),
Index('networks_bridge_deleted_idx', 'bridge', 'deleted'),
Index('networks_host_idx', 'host'),
Index('networks_project_id_deleted_idx', 'project_id', 'deleted'),
Index('networks_uuid_project_id_deleted_idx', 'uuid',
'project_id', 'deleted'),
Index('networks_vlan_deleted_idx', 'vlan', 'deleted'),
Index('networks_cidr_v6_idx', 'cidr_v6')
)
id = Column(Integer, primary_key=True, nullable=False)
label = Column(String(255))
injected = Column(Boolean, default=False)
cidr = Column(types.CIDR())
cidr_v6 = Column(types.CIDR())
multi_host = Column(Boolean, default=False)
gateway_v6 = Column(types.IPAddress())
netmask_v6 = Column(types.IPAddress())
netmask = Column(types.IPAddress())
bridge = Column(String(255))
bridge_interface = Column(String(255))
gateway = Column(types.IPAddress())
broadcast = Column(types.IPAddress())
dns1 = Column(types.IPAddress())
dns2 = Column(types.IPAddress())
vlan = Column(Integer)
vpn_public_address = Column(types.IPAddress())
vpn_public_port = Column(Integer)
vpn_private_address = Column(types.IPAddress())
dhcp_start = Column(types.IPAddress())
rxtx_base = Column(Integer)
project_id = Column(String(255))
priority = Column(Integer)
host = Column(String(255)) # , ForeignKey('hosts.id'))
uuid = Column(String(36))
mtu = Column(Integer)
dhcp_server = Column(types.IPAddress())
enable_dhcp = Column(Boolean, default=True)
share_address = Column(Boolean, default=False)
class VirtualInterface(BASE, NovaBase):
"""Represents a virtual interface on an instance."""
__tablename__ = 'virtual_interfaces'
__table_args__ = (
schema.UniqueConstraint("address", "deleted",
name="uniq_virtual_interfaces0address0deleted"),
Index('virtual_interfaces_network_id_idx', 'network_id'),
Index('virtual_interfaces_instance_uuid_fkey', 'instance_uuid'),
Index('virtual_interfaces_uuid_idx', 'uuid'),
)
id = Column(Integer, primary_key=True, nullable=False)
address = Column(String(255))
network_id = Column(Integer)
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
uuid = Column(String(36))
# TODO(vish): can these both come from the same baseclass?
class FixedIp(BASE, NovaBase):
"""Represents a fixed ip for an instance."""
__tablename__ = 'fixed_ips'
__table_args__ = (
schema.UniqueConstraint(
"address", "deleted", name="uniq_fixed_ips0address0deleted"),
Index('fixed_ips_virtual_interface_id_fkey', 'virtual_interface_id'),
Index('network_id', 'network_id'),
Index('address', 'address'),
Index('fixed_ips_instance_uuid_fkey', 'instance_uuid'),
Index('fixed_ips_host_idx', 'host'),
Index('fixed_ips_network_id_host_deleted_idx', 'network_id', 'host',
'deleted'),
Index('fixed_ips_address_reserved_network_id_deleted_idx',
'address', 'reserved', 'network_id', 'deleted'),
Index('fixed_ips_deleted_allocated_idx', 'address', 'deleted',
'allocated'),
Index('fixed_ips_deleted_allocated_updated_at_idx', 'deleted',
'allocated', 'updated_at')
)
id = Column(Integer, primary_key=True)
address = Column(types.IPAddress())
network_id = Column(Integer)
virtual_interface_id = Column(Integer)
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
# associated means that a fixed_ip has its instance_id column set
# allocated means that a fixed_ip has its virtual_interface_id column set
# TODO(sshturm) add default in db
allocated = Column(Boolean, default=False)
# leased means dhcp bridge has leased the ip
# TODO(sshturm) add default in db
leased = Column(Boolean, default=False)
# TODO(sshturm) add default in db
reserved = Column(Boolean, default=False)
host = Column(String(255))
network = orm.relationship(Network,
backref=orm.backref('fixed_ips'),
foreign_keys=network_id,
primaryjoin='and_('
'FixedIp.network_id == Network.id,'
'FixedIp.deleted == 0,'
'Network.deleted == 0)')
instance = orm.relationship(Instance,
foreign_keys=instance_uuid,
primaryjoin='and_('
'FixedIp.instance_uuid == Instance.uuid,'
'FixedIp.deleted == 0,'
'Instance.deleted == 0)')
virtual_interface = orm.relationship(VirtualInterface,
backref=orm.backref('fixed_ips'),
foreign_keys=virtual_interface_id,
primaryjoin='and_('
'FixedIp.virtual_interface_id == '
'VirtualInterface.id,'
'FixedIp.deleted == 0,'
'VirtualInterface.deleted == 0)')
class FloatingIp(BASE, NovaBase):
"""Represents a floating ip that dynamically forwards to a fixed ip."""
__tablename__ = 'floating_ips'
__table_args__ = (
schema.UniqueConstraint("address", "deleted",
name="uniq_floating_ips0address0deleted"),
Index('fixed_ip_id', 'fixed_ip_id'),
Index('floating_ips_host_idx', 'host'),
Index('floating_ips_project_id_idx', 'project_id'),
Index('floating_ips_pool_deleted_fixed_ip_id_project_id_idx',
'pool', 'deleted', 'fixed_ip_id', 'project_id')
)
id = Column(Integer, primary_key=True)
address = Column(types.IPAddress())
fixed_ip_id = Column(Integer)
project_id = Column(String(255))
host = Column(String(255)) # , ForeignKey('hosts.id'))
auto_assigned = Column(Boolean, default=False)
# TODO(sshturm) add default in db
pool = Column(String(255))
interface = Column(String(255))
fixed_ip = orm.relationship(FixedIp,
backref=orm.backref('floating_ips'),
foreign_keys=fixed_ip_id,
primaryjoin='and_('
'FloatingIp.fixed_ip_id == FixedIp.id,'
'FloatingIp.deleted == 0,'
'FixedIp.deleted == 0)')
class DNSDomain(BASE, NovaBase):
"""Represents a DNS domain with availability zone or project info."""
__tablename__ = 'dns_domains'
__table_args__ = (
Index('dns_domains_project_id_idx', 'project_id'),
Index('dns_domains_domain_deleted_idx', 'domain', 'deleted'),
)
deleted = Column(Boolean, default=False)
domain = Column(String(255), primary_key=True)
scope = Column(String(255))
availability_zone = Column(String(255))
project_id = Column(String(255))
class ConsolePool(BASE, NovaBase):
"""Represents pool of consoles on the same physical node."""
__tablename__ = 'console_pools'
__table_args__ = (
schema.UniqueConstraint(
"host", "console_type", "compute_host", "deleted",
name="uniq_console_pools0host0console_type0compute_host0deleted"),
)
id = Column(Integer, primary_key=True)
address = Column(types.IPAddress())
username = Column(String(255))
password = Column(String(255))
console_type = Column(String(255))
public_hostname = Column(String(255))
host = Column(String(255))
compute_host = Column(String(255))
class Console(BASE, NovaBase):
"""Represents a console session for an instance."""
__tablename__ = 'consoles'
__table_args__ = (
Index('consoles_instance_uuid_idx', 'instance_uuid'),
)
id = Column(Integer, primary_key=True)
instance_name = Column(String(255))
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
password = Column(String(255))
port = Column(Integer)
pool_id = Column(Integer, ForeignKey('console_pools.id'))
pool = orm.relationship(ConsolePool, backref=orm.backref('consoles'))
class InstanceMetadata(BASE, NovaBase):
"""Represents a user-provided metadata key/value pair for an instance."""
__tablename__ = 'instance_metadata'
__table_args__ = (
Index('instance_metadata_instance_uuid_idx', 'instance_uuid'),
)
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
instance = orm.relationship(Instance, backref="metadata",
foreign_keys=instance_uuid,
primaryjoin='and_('
'InstanceMetadata.instance_uuid == '
'Instance.uuid,'
'InstanceMetadata.deleted == 0)')
class InstanceSystemMetadata(BASE, NovaBase):
"""Represents a system-owned metadata key/value pair for an instance."""
__tablename__ = 'instance_system_metadata'
__table_args__ = (
Index('instance_uuid', 'instance_uuid'),
)
id = Column(Integer, primary_key=True)
key = Column(String(255), nullable=False)
value = Column(String(255))
instance_uuid = Column(String(36),
ForeignKey('instances.uuid'),
nullable=False)
instance = orm.relationship(Instance, backref="system_metadata",
foreign_keys=instance_uuid)
class InstanceTypeProjects(BASE, NovaBase):
"""Represent projects associated instance_types."""
__tablename__ = "instance_type_projects"
__table_args__ = (schema.UniqueConstraint(
"instance_type_id", "project_id", "deleted",
name="uniq_instance_type_projects0instance_type_id0project_id0deleted"
),
)
id = Column(Integer, primary_key=True)
instance_type_id = Column(Integer, ForeignKey('instance_types.id'),
nullable=False)
project_id = Column(String(255))
instance_type = orm.relationship(InstanceTypes, backref="projects",
foreign_keys=instance_type_id,
primaryjoin='and_('
'InstanceTypeProjects.instance_type_id == InstanceTypes.id,'
'InstanceTypeProjects.deleted == 0)')
class InstanceTypeExtraSpecs(BASE, NovaBase):
"""Represents additional specs as key/value pairs for an instance_type."""
__tablename__ = 'instance_type_extra_specs'
__table_args__ = (
Index('instance_type_extra_specs_instance_type_id_key_idx',
'instance_type_id', 'key'),
schema.UniqueConstraint(
"instance_type_id", "key", "deleted",
name=("uniq_instance_type_extra_specs0"
"instance_type_id0key0deleted")
),
{'mysql_collate': 'utf8_bin'},
)
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
instance_type_id = Column(Integer, ForeignKey('instance_types.id'),
nullable=False)
instance_type = orm.relationship(InstanceTypes, backref="extra_specs",
foreign_keys=instance_type_id,
primaryjoin='and_('
'InstanceTypeExtraSpecs.instance_type_id == InstanceTypes.id,'
'InstanceTypeExtraSpecs.deleted == 0)')
class Cell(BASE, NovaBase):
"""Represents parent and child cells of this cell. Cells can
have multiple parents and children, so there could be any number
of entries with is_parent=True or False
"""
__tablename__ = 'cells'
__table_args__ = (schema.UniqueConstraint(
"name", "deleted", name="uniq_cells0name0deleted"
),
)
id = Column(Integer, primary_key=True)
# Name here is the 'short name' of a cell. For instance: 'child1'
name = Column(String(255))
api_url = Column(String(255))
transport_url = Column(String(255), nullable=False)
weight_offset = Column(Float(), default=0.0)
weight_scale = Column(Float(), default=1.0)
is_parent = Column(Boolean())
class AggregateHost(BASE, NovaBase):
"""Represents a host that is member of an aggregate."""
__tablename__ = 'aggregate_hosts'
__table_args__ = (schema.UniqueConstraint(
"host", "aggregate_id", "deleted",
name="uniq_aggregate_hosts0host0aggregate_id0deleted"
),
)
id = Column(Integer, primary_key=True, autoincrement=True)
host = Column(String(255))
aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False)
class AggregateMetadata(BASE, NovaBase):
"""Represents a metadata key/value pair for an aggregate."""
__tablename__ = 'aggregate_metadata'
__table_args__ = (
schema.UniqueConstraint("aggregate_id", "key", "deleted",
name="uniq_aggregate_metadata0aggregate_id0key0deleted"
),
Index('aggregate_metadata_key_idx', 'key'),
)
id = Column(Integer, primary_key=True)
key = Column(String(255), nullable=False)
value = Column(String(255), nullable=False)
aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False)
class Aggregate(BASE, NovaBase):
"""Represents a cluster of hosts that exists in this zone."""
__tablename__ = 'aggregates'
__table_args__ = ()
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(255))
_hosts = orm.relationship(AggregateHost,
primaryjoin='and_('
'Aggregate.id == AggregateHost.aggregate_id,'
'AggregateHost.deleted == 0,'
'Aggregate.deleted == 0)')
_metadata = orm.relationship(AggregateMetadata,
primaryjoin='and_('
'Aggregate.id == AggregateMetadata.aggregate_id,'
'AggregateMetadata.deleted == 0,'
'Aggregate.deleted == 0)')
@property
def _extra_keys(self):
return ['hosts', 'metadetails', 'availability_zone']
@property
def hosts(self):
return [h.host for h in self._hosts]
@property
def metadetails(self):
return {m.key: m.value for m in self._metadata}
@property
def availability_zone(self):
if 'availability_zone' not in self.metadetails:
return None
return self.metadetails['availability_zone']
class AgentBuild(BASE, NovaBase):
"""Represents an agent build."""
__tablename__ = 'agent_builds'
__table_args__ = (
Index('agent_builds_hypervisor_os_arch_idx', 'hypervisor', 'os',
'architecture'),
schema.UniqueConstraint("hypervisor", "os", "architecture", "deleted",
name="uniq_agent_builds0hypervisor0os0architecture0deleted"),
)
id = Column(Integer, primary_key=True)
hypervisor = Column(String(255))
os = Column(String(255))
architecture = Column(String(255))
version = Column(String(255))
url = Column(String(255))
md5hash = Column(String(255))
class BandwidthUsage(BASE, NovaBase):
"""Cache for instance bandwidth usage data pulled from the hypervisor."""
__tablename__ = 'bw_usage_cache'
__table_args__ = (
Index('bw_usage_cache_uuid_start_period_idx', 'uuid',
'start_period'),
)
id = Column(Integer, primary_key=True, nullable=False)
uuid = Column(String(36))
mac = Column(String(255))
start_period = Column(DateTime, nullable=False)
last_refreshed = Column(DateTime)
bw_in = Column(BigInteger)
bw_out = Column(BigInteger)
last_ctr_in = Column(BigInteger)
last_ctr_out = Column(BigInteger)
class VolumeUsage(BASE, NovaBase):
"""Cache for volume usage data pulled from the hypervisor."""
__tablename__ = 'volume_usage_cache'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False)
volume_id = Column(String(36), nullable=False)
instance_uuid = Column(String(36))
project_id = Column(String(36))
user_id = Column(String(64))
availability_zone = Column(String(255))
tot_last_refreshed = Column(DateTime)
tot_reads = Column(BigInteger, default=0)
tot_read_bytes = Column(BigInteger, default=0)
tot_writes = Column(BigInteger, default=0)
tot_write_bytes = Column(BigInteger, default=0)
curr_last_refreshed = Column(DateTime)
curr_reads = Column(BigInteger, default=0)
curr_read_bytes = Column(BigInteger, default=0)
curr_writes = Column(BigInteger, default=0)
curr_write_bytes = Column(BigInteger, default=0)
class S3Image(BASE, NovaBase):
"""Compatibility layer for the S3 image service talking to Glance."""
__tablename__ = 's3_images'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class VolumeIdMapping(BASE, NovaBase):
"""Compatibility layer for the EC2 volume service."""
__tablename__ = 'volume_id_mappings'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class SnapshotIdMapping(BASE, NovaBase):
"""Compatibility layer for the EC2 snapshot service."""
__tablename__ = 'snapshot_id_mappings'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class InstanceFault(BASE, NovaBase):
__tablename__ = 'instance_faults'
__table_args__ = (
Index('instance_faults_host_idx', 'host'),
Index('instance_faults_instance_uuid_deleted_created_at_idx',
'instance_uuid', 'deleted', 'created_at')
)
id = Column(Integer, primary_key=True, nullable=False)
instance_uuid = Column(String(36),
ForeignKey('instances.uuid'))
code = Column(Integer(), nullable=False)
message = Column(String(255))
details = Column(MediumText())
host = Column(String(255))
class InstanceAction(BASE, NovaBase):
"""Track client actions on an instance.
The intention is that there will only be one of these per user request. A
lookup by (instance_uuid, request_id) should always return a single result.
"""
__tablename__ = 'instance_actions'
__table_args__ = (
Index('instance_uuid_idx', 'instance_uuid'),
Index('request_id_idx', 'request_id')
)
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
action = Column(String(255))
instance_uuid = Column(String(36),
ForeignKey('instances.uuid'))
request_id = Column(String(255))
user_id = Column(String(255))
project_id = Column(String(255))
start_time = Column(DateTime, default=timeutils.utcnow)
finish_time = Column(DateTime)
message = Column(String(255))
class InstanceActionEvent(BASE, NovaBase):
"""Track events that occur during an InstanceAction."""
__tablename__ = 'instance_actions_events'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
event = Column(String(255))
action_id = Column(Integer, ForeignKey('instance_actions.id'))
start_time = Column(DateTime, default=timeutils.utcnow)
finish_time = Column(DateTime)
result = Column(String(255))
traceback = Column(Text)
host = Column(String(255))
details = Column(Text)
class InstanceIdMapping(BASE, NovaBase):
"""Compatibility layer for the EC2 instance service."""
__tablename__ = 'instance_id_mappings'
__table_args__ = (
Index('ix_instance_id_mappings_uuid', 'uuid'),
)
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class TaskLog(BASE, NovaBase):
"""Audit log for background periodic tasks."""
__tablename__ = 'task_log'
__table_args__ = (
schema.UniqueConstraint(
'task_name', 'host', 'period_beginning', 'period_ending',
name="uniq_task_log0task_name0host0period_beginning0period_ending"
),
Index('ix_task_log_period_beginning', 'period_beginning'),
Index('ix_task_log_host', 'host'),
Index('ix_task_log_period_ending', 'period_ending'),
)
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
task_name = Column(String(255), nullable=False)
state = Column(String(255), nullable=False)
host = Column(String(255), nullable=False)
period_beginning = Column(DateTime, default=timeutils.utcnow,
nullable=False)
period_ending = Column(DateTime, default=timeutils.utcnow,
nullable=False)
message = Column(String(255), nullable=False)
task_items = Column(Integer(), default=0)
errors = Column(Integer(), default=0)
class InstanceGroupMember(BASE, NovaBase):
"""Represents the members for an instance group."""
__tablename__ = 'instance_group_member'
__table_args__ = (
Index('instance_group_member_instance_idx', 'instance_id'),
)
id = Column(Integer, primary_key=True, nullable=False)
instance_id = Column(String(255))
group_id = Column(Integer, ForeignKey('instance_groups.id'),
nullable=False)
class InstanceGroupPolicy(BASE, NovaBase):
"""Represents the policy type for an instance group."""
__tablename__ = 'instance_group_policy'
__table_args__ = (
Index('instance_group_policy_policy_idx', 'policy'),
)
id = Column(Integer, primary_key=True, nullable=False)
policy = Column(String(255))
group_id = Column(Integer, ForeignKey('instance_groups.id'),
nullable=False)
class InstanceGroup(BASE, NovaBase):
"""Represents an instance group.
A group will maintain a collection of instances and the relationship
between them.
"""
__tablename__ = 'instance_groups'
__table_args__ = (
schema.UniqueConstraint("uuid", "deleted",
name="uniq_instance_groups0uuid0deleted"),
)
id = Column(Integer, primary_key=True, autoincrement=True)
user_id = Column(String(255))
project_id = Column(String(255))
uuid = Column(String(36), nullable=False)
name = Column(String(255))
_policies = orm.relationship(InstanceGroupPolicy, primaryjoin='and_('
'InstanceGroup.id == InstanceGroupPolicy.group_id,'
'InstanceGroupPolicy.deleted == 0,'
'InstanceGroup.deleted == 0)')
_members = orm.relationship(InstanceGroupMember, primaryjoin='and_('
'InstanceGroup.id == InstanceGroupMember.group_id,'
'InstanceGroupMember.deleted == 0,'
'InstanceGroup.deleted == 0)')
@property
def policies(self):
return [p.policy for p in self._policies]
@property
def members(self):
return [m.instance_id for m in self._members]
class PciDevice(BASE, NovaBase):
"""Represents a PCI host device that can be passed through to instances.
"""
__tablename__ = 'pci_devices'
__table_args__ = (
Index('ix_pci_devices_compute_node_id_deleted',
'compute_node_id', 'deleted'),
Index('ix_pci_devices_instance_uuid_deleted',
'instance_uuid', 'deleted'),
schema.UniqueConstraint(
"compute_node_id", "address", "deleted",
name="uniq_pci_devices0compute_node_id0address0deleted")
)
id = Column(Integer, primary_key=True)
compute_node_id = Column(Integer, ForeignKey('compute_nodes.id'),
nullable=False)
# physical address of device domain:bus:slot.func (0000:09:01.1)
address = Column(String(12), nullable=False)
vendor_id = Column(String(4), nullable=False)
product_id = Column(String(4), nullable=False)
dev_type = Column(String(8), nullable=False)
dev_id = Column(String(255))
# label is abstract device name, that is used to unify devices with the
# same functionality with different addresses or host.
label = Column(String(255), nullable=False)
status = Column(String(36), nullable=False)
# the request_id is used to identify a device that is allocated for a
# particular request
request_id = Column(String(36), nullable=True)
extra_info = Column(Text)
instance_uuid = Column(String(36))
numa_node = Column(Integer, nullable=True)
instance = orm.relationship(Instance, backref="pci_devices",
foreign_keys=instance_uuid,
primaryjoin='and_('
'PciDevice.instance_uuid == Instance.uuid,'
'PciDevice.deleted == 0)')
class Tag(BASE, models.ModelBase):
"""Represents the tag for a resource."""
__tablename__ = "tags"
__table_args__ = (
Index('tags_tag_idx', 'tag'),
)
resource_id = Column(String(36), primary_key=True, nullable=False)
tag = Column(Unicode(80), primary_key=True, nullable=False)
instance = orm.relationship(
"Instance",
backref='tags',
primaryjoin='and_(Tag.resource_id == Instance.uuid,'
'Instance.deleted == 0)',
foreign_keys=resource_id
)
|
cicku/fedmsg | refs/heads/develop | fedmsg.d/ssl.py | 5 | # This file is part of fedmsg.
# Copyright (C) 2012 - 2014 Red Hat, Inc.
#
# fedmsg is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# fedmsg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with fedmsg; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors: Ralph Bean <rbean@redhat.com>
#
import os
import socket
SEP = os.path.sep
here = os.getcwd()
config = dict(
sign_messages=False,
validate_signatures=True,
# Use these implementations to sign and validate messages
crypto_backend='x509',
crypto_validate_backends=['x509'],
ssldir="/etc/pki/fedmsg",
crl_location="https://fedoraproject.org/fedmsg/crl.pem",
crl_cache="/var/run/fedmsg/crl.pem",
crl_cache_expiry=10,
ca_cert_location="https://fedoraproject.org/fedmsg/ca.crt",
ca_cert_cache="/var/run/fedmsg/ca.crt",
ca_cert_cache_expiry=0, # Never expires
certnames={
# In prod/stg, map hostname to the name of the cert in ssldir.
# Unfortunately, we can't use socket.getfqdn()
#"app01.stg": "app01.stg.phx2.fedoraproject.org",
},
# A mapping of fully qualified topics to a list of cert names for which
# a valid signature is to be considered authorized. Messages on topics not
# listed here are considered automatically authorized.
routing_policy={
# Only allow announcements from production if they're signed by a
# certain certificate.
"org.fedoraproject.prod.announce.announcement": [
"announce-lockbox.phx2.fedoraproject.org",
],
},
# Set this to True if you want messages to be dropped that aren't
# explicitly whitelisted in the routing_policy.
# When this is False, only messages that have a topic in the routing_policy
# but whose cert names aren't in the associated list are dropped; messages
# whose topics do not appear in the routing_policy are not dropped.
routing_nitpicky=False,
)
|
potatolondon/django-nonrel-1-4 | refs/heads/master | django/conf/global_settings.py | 29 | # Default Django settings. Override these with settings in the module
# pointed-to by the DJANGO_SETTINGS_MODULE environment variable.
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
gettext_noop = lambda s: s
####################
# CORE #
####################
DEBUG = False
TEMPLATE_DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing situations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# Whether to use the "Etag" header. This saves bandwidth but slows down performance.
USE_ETAGS = False
# People who get code error notifications.
# In the format (('Full Name', 'email@example.com'), ('Full Name', 'anotheremail@example.com'))
ADMINS = ()
# Tuple of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = ()
# Hosts/domain names that are valid for this site.
# "*" matches anything, ".example.com" matches example.com and all subdomains
ALLOWED_HOSTS = ['*']
# Local time zone for this installation. All choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities). When USE_TZ is True, this is
# interpreted as the default user time zone.
TIME_ZONE = 'America/Chicago'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = False
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box. The language name
# should be the utf-8 encoded local name for the language.
LANGUAGES = (
('ar', gettext_noop('Arabic')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('bn', gettext_noop('Bengali')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-gb', gettext_noop('British English')),
('eo', gettext_noop('Esperanto')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('es-ni', gettext_noop('Nicaraguan Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy-nl', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hu', gettext_noop('Hungarian')),
('id', gettext_noop('Indonesian')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('kk', gettext_noop('Kazakh')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('nb', gettext_noop('Norwegian Bokmal')),
('ne', gettext_noop('Nepali')),
('nl', gettext_noop('Dutch')),
('nn', gettext_noop('Norwegian Nynorsk')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('sw', gettext_noop('Swahili')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('tt', gettext_noop('Tatar')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('vi', gettext_noop('Vietnamese')),
('zh-cn', gettext_noop('Simplified Chinese')),
('zh-tw', gettext_noop('Traditional Chinese')),
)
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ("he", "ar", "fa")
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = ()
LANGUAGE_COOKIE_NAME = 'django_language'
# If you set this to True, Django will format dates, numbers and calendars
# according to user current locale.
USE_L10N = False
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various emails.
MANAGERS = ADMINS
# Default content type and charset to use for all HttpResponse objects, if a
# MIME type isn't manually specified. These are used to construct the
# Content-Type header.
DEFAULT_CONTENT_TYPE = 'text/html'
DEFAULT_CHARSET = 'utf-8'
# Encoding of files read from disk (template and initial SQL files).
FILE_CHARSET = 'utf-8'
# E-mail address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Whether to send broken-link emails.
SEND_BROKEN_LINK_EMAILS = False
# Database connection info.
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.dummy',
},
}
# Classes used to implement DB routing behavior.
DATABASE_ROUTERS = []
# The email backend to use. For possible shortcuts see django.core.mail.
# The default is to use the SMTP backend.
# Third-party backends can be specified by providing a Python path
# to a module that defines an EmailBackend class.
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Host for sending email.
EMAIL_HOST = 'localhost'
# Port for sending email.
EMAIL_PORT = 25
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
# List of strings representing installed apps.
INSTALLED_APPS = ()
# List of locations of the template source files, in search order.
TEMPLATE_DIRS = ()
# List of callables that know how to import templates from various sources.
# See the comments in django/core/template/loader.py for interface
# documentation.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
# 'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
)
# Output to use in template system for invalid (e.g. misspelled) variables.
TEMPLATE_STRING_IF_INVALID = ''
# Default email address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = (
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search')
# )
DISALLOWED_USER_AGENTS = ()
ABSOLUTE_URL_OVERRIDES = {}
# Tuple of strings representing allowed prefixes for the {% ssi %} tag.
# Example: ('/home/html', '/var/www')
ALLOWED_INCLUDE_ROOTS = ()
# If this is a admin settings module, this should be a list of
# settings modules (in the format 'foo.bar.baz') for which this admin
# is an admin.
ADMIN_FOR = ()
# List of compiled regular expression objects representing URLs that need not
# be reported when SEND_BROKEN_LINK_EMAILS is True. Here are a few examples:
# import re
# IGNORABLE_404_URLS = (
# re.compile(r'^/apple-touch-icon.*\.png$'),
# re.compile(r'^/favicon.ico$),
# re.compile(r'^/robots.txt$),
# re.compile(r'^/phpmyadmin/),
# re.compile(r'\.(cgi|php|pl)$'),
# )
IGNORABLE_404_URLS = ()
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com/media/"
MEDIA_URL = ''
# Absolute path to the directory that holds static files.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL that handles the static files served from STATIC_ROOT.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = None
# List of upload handler classes to be applied in order.
FILE_UPLOAD_HANDLERS = (
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
)
# Maximum size, in bytes, of a request before it will be streamed to the
# file system instead of into memory.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Directory in which upload streamed files will be temporarily saved. A value of
# `None` will make Django use the operating system's default temporary directory
# (i.e. "/tmp" on *nix systems).
FILE_UPLOAD_TEMP_DIR = None
# The numeric mode to set newly-uploaded files to. The value should be a mode
# you'd pass directly to os.chmod; see http://docs.python.org/lib/os-file-dir.html.
FILE_UPLOAD_PERMISSIONS = None
# Python module path where user will place custom format definition.
# The directory where this setting is pointing should contain subdirectories
# named as the locales, containing a formats.py file
# (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use)
FORMAT_MODULE_PATH = None
# Default formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
MONTH_DAY_FORMAT = 'F j'
# Default short formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATE_FORMAT = 'm/d/Y'
# Default short formatting for datetime objects.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATETIME_FORMAT = 'm/d/Y P'
# Default formats to be used when parsing dates from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
'%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
'%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
'%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
'%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
# Default formats to be used when parsing times from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
# Default formats to be used when parsing dates and times from input boxes,
# in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
# First day of week, to be used on calendars
# 0 means Sunday, 1 means Monday...
FIRST_DAY_OF_WEEK = 0
# Decimal separator symbol
DECIMAL_SEPARATOR = '.'
# Boolean that sets whether to add thousand separator when formatting numbers
USE_THOUSAND_SEPARATOR = False
# Number of digits that will be together, when splitting them by
# THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands...
NUMBER_GROUPING = 0
# Thousand separator symbol
THOUSAND_SEPARATOR = ','
# Do you want to manage transactions manually?
# Hint: you really don't!
TRANSACTIONS_MANAGED = False
# The User-Agent string to use when checking for URL validity through the
# isExistingURL validator.
from django import get_version
URL_VALIDATOR_USER_AGENT = "Django/%s (https://www.djangoproject.com)" % get_version()
# The tablespaces to use for each model when not specified otherwise.
DEFAULT_TABLESPACE = ''
DEFAULT_INDEX_TABLESPACE = ''
# Default X-Frame-Options header value
X_FRAME_OPTIONS = 'SAMEORIGIN'
USE_X_FORWARDED_HOST = False
# The Python dotted path to the WSGI application that Django's internal servers
# (runserver, runfcgi) will use. If `None`, the return value of
# 'django.core.wsgi.get_wsgi_application' is used, thus preserving the same
# behavior as previous versions of Django. Otherwise this should point to an
# actual WSGI application object.
WSGI_APPLICATION = None
# If your Django app is behind a proxy that sets a header to specify secure
# connections, AND that proxy ensures that user-submitted headers with the
# same name are ignored (so that people can't spoof it), set this value to
# a tuple of (header_name, header_value). For any requests that come in with
# that header/value, request.is_secure() will return True.
# WARNING! Only set this if you fully understand what you're doing. Otherwise,
# you may be opening yourself up to a security risk.
SECURE_PROXY_SSL_HEADER = None
##############
# MIDDLEWARE #
##############
# List of middleware classes to use. Order is important; in the request phase,
# this middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# 'django.middleware.http.ConditionalGetMiddleware',
# 'django.middleware.gzip.GZipMiddleware',
)
############
# SESSIONS #
############
SESSION_COOKIE_NAME = 'sessionid' # Cookie name. This can be whatever you want.
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2 # Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_DOMAIN = None # A string like ".lawrence.com", or None for standard domain cookie.
SESSION_COOKIE_SECURE = False # Whether the session cookie should be secure (https:// only).
SESSION_COOKIE_PATH = '/' # The path of the session cookie.
SESSION_COOKIE_HTTPONLY = True # Whether to use the non-RFC standard httpOnly flag (IE, FF3+, others)
SESSION_SAVE_EVERY_REQUEST = False # Whether to save the session data on every request.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False # Whether a user's session cookie expires when the Web browser is closed.
SESSION_ENGINE = 'django.contrib.sessions.backends.db' # The module to store session data
SESSION_FILE_PATH = None # Directory to store session files if using the file session module. If None, the backend will use a sensible default.
#########
# CACHE #
#########
# New format
CACHES = {
}
# The cache backend to use. See the docstring in django.core.cache for the
# possible values.
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
CACHE_MIDDLEWARE_ALIAS = 'default'
####################
# COMMENTS #
####################
COMMENTS_ALLOW_PROFANITIES = False
# The profanities that will trigger a validation error in
# CommentDetailsForm.clean_comment. All of these should be in lowercase.
PROFANITIES_LIST = ()
##################
# AUTHENTICATION #
##################
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',)
LOGIN_URL = '/accounts/login/'
LOGOUT_URL = '/accounts/logout/'
LOGIN_REDIRECT_URL = '/accounts/profile/'
# The number of days a password reset link is valid for
PASSWORD_RESET_TIMEOUT_DAYS = 3
# the first hasher in this list is the preferred algorithm. any
# password using different algorithms will be converted automatically
# upon login
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.UnsaltedSHA1PasswordHasher',
'django.contrib.auth.hashers.UnsaltedMD5PasswordHasher',
'django.contrib.auth.hashers.CryptPasswordHasher',
)
###########
# SIGNING #
###########
SIGNING_BACKEND = 'django.core.signing.TimestampSigner'
########
# CSRF #
########
# Dotted path to callable to be used as view when a request is
# rejected by the CSRF middleware.
CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure'
# Settings for CSRF cookie.
CSRF_COOKIE_NAME = 'csrftoken'
CSRF_COOKIE_DOMAIN = None
CSRF_COOKIE_PATH = '/'
CSRF_COOKIE_SECURE = False
############
# MESSAGES #
############
# Class to use as messages backend
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
# Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within
# django.contrib.messages to avoid imports in this settings file.
###########
# LOGGING #
###########
# The callable to use to configure logging
LOGGING_CONFIG = 'django.utils.log.dictConfig'
# The default logging configuration. This sends an email to
# the site admins on every HTTP 500 error. All other log
# records are sent to the bit bucket.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Default exception reporter filter class used in case none has been
# specifically assigned to the HttpRequest instance.
DEFAULT_EXCEPTION_REPORTER_FILTER = 'django.views.debug.SafeExceptionReporterFilter'
###########
# TESTING #
###########
# The name of the class to use to run the test suite
TEST_RUNNER = 'django.test.simple.DjangoTestSuiteRunner'
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = ()
###############
# STATICFILES #
###############
# A list of locations of additional static files
STATICFILES_DIRS = ()
# The default file storage backend used during the build process
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
|
CenterForOpenScience/osf.io | refs/heads/develop | osf/migrations/0013_auto_20170412_0957.py | 28 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-04-12 14:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('osf', '0012_auto_20170411_1548'),
]
operations = [
migrations.AlterField(
model_name='basefilenode',
name='name',
field=models.TextField(blank=True, default=''),
preserve_default=False,
),
]
|
dmsul/econtools | refs/heads/master | econtools/metrics/tests/data/src_atsls.py | 1 | import pandas as pd
import numpy as np
class regout(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
stat_names=['coeff', 'se', 't', 'p>t', 'CI_low', 'CI_high']
var_names=['mpg', 'length', ]
atsls_std = regout(
summary=pd.DataFrame(np.array([
[-49467.76193509437,
1937433.402520039,
-.0255326257257417,
.9797711358109091,
-3978764.822885125,
3879829.299014937,
],
[-8280.069027964491,
327594.5105698549,
-.0252753595094167,
.9799749166015559,
-672672.5306688137,
656112.3926128848,
],
]),
columns=stat_names,
index=var_names),
vce=pd.DataFrame(np.array([
[3753648189200.375,
634678341446.1863,
],
[634678341446.1863,
107318163355.5028,
],
]),
columns=var_names,
index=var_names),
N=51,
r2=np.nan,
r2_a=np.nan,
mss=np.nan,
tss=np.nan,
rss=973193578332.4143,
kappa=np.nan,
F=.0010619545509911,
pF=.9989386404150321,
)
|
40323144/2015cdb_g7 | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/zipfile.py | 620 | """
Read and write ZIP files.
XXX references to utf-8 need further investigation.
"""
import io
import os
import re
import imp
import sys
import time
import stat
import shutil
import struct
import binascii
try:
import zlib # We may need its compression method
crc32 = zlib.crc32
except ImportError:
zlib = None
crc32 = binascii.crc32
try:
import bz2 # We may need its compression method
except ImportError:
bz2 = None
try:
import lzma # We may need its compression method
except ImportError:
lzma = None
__all__ = ["BadZipFile", "BadZipfile", "error",
"ZIP_STORED", "ZIP_DEFLATED", "ZIP_BZIP2", "ZIP_LZMA",
"is_zipfile", "ZipInfo", "ZipFile", "PyZipFile", "LargeZipFile"]
class BadZipFile(Exception):
pass
class LargeZipFile(Exception):
"""
Raised when writing a zipfile, the zipfile requires ZIP64 extensions
and those extensions are disabled.
"""
error = BadZipfile = BadZipFile # Pre-3.2 compatibility names
ZIP64_LIMIT = (1 << 31) - 1
ZIP_FILECOUNT_LIMIT = 1 << 16
ZIP_MAX_COMMENT = (1 << 16) - 1
# constants for Zip file compression methods
ZIP_STORED = 0
ZIP_DEFLATED = 8
ZIP_BZIP2 = 12
ZIP_LZMA = 14
# Other ZIP compression methods not supported
DEFAULT_VERSION = 20
ZIP64_VERSION = 45
BZIP2_VERSION = 46
LZMA_VERSION = 63
# we recognize (but not necessarily support) all features up to that version
MAX_EXTRACT_VERSION = 63
# Below are some formats and associated data for reading/writing headers using
# the struct module. The names and structures of headers/records are those used
# in the PKWARE description of the ZIP file format:
# http://www.pkware.com/documents/casestudies/APPNOTE.TXT
# (URL valid as of January 2008)
# The "end of central directory" structure, magic number, size, and indices
# (section V.I in the format document)
structEndArchive = b"<4s4H2LH"
stringEndArchive = b"PK\005\006"
sizeEndCentDir = struct.calcsize(structEndArchive)
_ECD_SIGNATURE = 0
_ECD_DISK_NUMBER = 1
_ECD_DISK_START = 2
_ECD_ENTRIES_THIS_DISK = 3
_ECD_ENTRIES_TOTAL = 4
_ECD_SIZE = 5
_ECD_OFFSET = 6
_ECD_COMMENT_SIZE = 7
# These last two indices are not part of the structure as defined in the
# spec, but they are used internally by this module as a convenience
_ECD_COMMENT = 8
_ECD_LOCATION = 9
# The "central directory" structure, magic number, size, and indices
# of entries in the structure (section V.F in the format document)
structCentralDir = "<4s4B4HL2L5H2L"
stringCentralDir = b"PK\001\002"
sizeCentralDir = struct.calcsize(structCentralDir)
# indexes of entries in the central directory structure
_CD_SIGNATURE = 0
_CD_CREATE_VERSION = 1
_CD_CREATE_SYSTEM = 2
_CD_EXTRACT_VERSION = 3
_CD_EXTRACT_SYSTEM = 4
_CD_FLAG_BITS = 5
_CD_COMPRESS_TYPE = 6
_CD_TIME = 7
_CD_DATE = 8
_CD_CRC = 9
_CD_COMPRESSED_SIZE = 10
_CD_UNCOMPRESSED_SIZE = 11
_CD_FILENAME_LENGTH = 12
_CD_EXTRA_FIELD_LENGTH = 13
_CD_COMMENT_LENGTH = 14
_CD_DISK_NUMBER_START = 15
_CD_INTERNAL_FILE_ATTRIBUTES = 16
_CD_EXTERNAL_FILE_ATTRIBUTES = 17
_CD_LOCAL_HEADER_OFFSET = 18
# The "local file header" structure, magic number, size, and indices
# (section V.A in the format document)
structFileHeader = "<4s2B4HL2L2H"
stringFileHeader = b"PK\003\004"
sizeFileHeader = struct.calcsize(structFileHeader)
_FH_SIGNATURE = 0
_FH_EXTRACT_VERSION = 1
_FH_EXTRACT_SYSTEM = 2
_FH_GENERAL_PURPOSE_FLAG_BITS = 3
_FH_COMPRESSION_METHOD = 4
_FH_LAST_MOD_TIME = 5
_FH_LAST_MOD_DATE = 6
_FH_CRC = 7
_FH_COMPRESSED_SIZE = 8
_FH_UNCOMPRESSED_SIZE = 9
_FH_FILENAME_LENGTH = 10
_FH_EXTRA_FIELD_LENGTH = 11
# The "Zip64 end of central directory locator" structure, magic number, and size
structEndArchive64Locator = "<4sLQL"
stringEndArchive64Locator = b"PK\x06\x07"
sizeEndCentDir64Locator = struct.calcsize(structEndArchive64Locator)
# The "Zip64 end of central directory" record, magic number, size, and indices
# (section V.G in the format document)
structEndArchive64 = "<4sQ2H2L4Q"
stringEndArchive64 = b"PK\x06\x06"
sizeEndCentDir64 = struct.calcsize(structEndArchive64)
_CD64_SIGNATURE = 0
_CD64_DIRECTORY_RECSIZE = 1
_CD64_CREATE_VERSION = 2
_CD64_EXTRACT_VERSION = 3
_CD64_DISK_NUMBER = 4
_CD64_DISK_NUMBER_START = 5
_CD64_NUMBER_ENTRIES_THIS_DISK = 6
_CD64_NUMBER_ENTRIES_TOTAL = 7
_CD64_DIRECTORY_SIZE = 8
_CD64_OFFSET_START_CENTDIR = 9
def _check_zipfile(fp):
try:
if _EndRecData(fp):
return True # file has correct magic number
except IOError:
pass
return False
def is_zipfile(filename):
"""Quickly see if a file is a ZIP file by checking the magic number.
The filename argument may be a file or file-like object too.
"""
result = False
try:
if hasattr(filename, "read"):
result = _check_zipfile(fp=filename)
else:
with open(filename, "rb") as fp:
result = _check_zipfile(fp)
except IOError:
pass
return result
def _EndRecData64(fpin, offset, endrec):
"""
Read the ZIP64 end-of-archive records and use that to update endrec
"""
try:
fpin.seek(offset - sizeEndCentDir64Locator, 2)
except IOError:
# If the seek fails, the file is not large enough to contain a ZIP64
# end-of-archive record, so just return the end record we were given.
return endrec
data = fpin.read(sizeEndCentDir64Locator)
if len(data) != sizeEndCentDir64Locator:
return endrec
sig, diskno, reloff, disks = struct.unpack(structEndArchive64Locator, data)
if sig != stringEndArchive64Locator:
return endrec
if diskno != 0 or disks != 1:
raise BadZipFile("zipfiles that span multiple disks are not supported")
# Assume no 'zip64 extensible data'
fpin.seek(offset - sizeEndCentDir64Locator - sizeEndCentDir64, 2)
data = fpin.read(sizeEndCentDir64)
if len(data) != sizeEndCentDir64:
return endrec
sig, sz, create_version, read_version, disk_num, disk_dir, \
dircount, dircount2, dirsize, diroffset = \
struct.unpack(structEndArchive64, data)
if sig != stringEndArchive64:
return endrec
# Update the original endrec using data from the ZIP64 record
endrec[_ECD_SIGNATURE] = sig
endrec[_ECD_DISK_NUMBER] = disk_num
endrec[_ECD_DISK_START] = disk_dir
endrec[_ECD_ENTRIES_THIS_DISK] = dircount
endrec[_ECD_ENTRIES_TOTAL] = dircount2
endrec[_ECD_SIZE] = dirsize
endrec[_ECD_OFFSET] = diroffset
return endrec
def _EndRecData(fpin):
"""Return data from the "End of Central Directory" record, or None.
The data is a list of the nine items in the ZIP "End of central dir"
record followed by a tenth item, the file seek offset of this record."""
# Determine file size
fpin.seek(0, 2)
filesize = fpin.tell()
# Check to see if this is ZIP file with no archive comment (the
# "end of central directory" structure should be the last item in the
# file if this is the case).
try:
fpin.seek(-sizeEndCentDir, 2)
except IOError:
return None
data = fpin.read()
if (len(data) == sizeEndCentDir and
data[0:4] == stringEndArchive and
data[-2:] == b"\000\000"):
# the signature is correct and there's no comment, unpack structure
endrec = struct.unpack(structEndArchive, data)
endrec=list(endrec)
# Append a blank comment and record start offset
endrec.append(b"")
endrec.append(filesize - sizeEndCentDir)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, -sizeEndCentDir, endrec)
# Either this is not a ZIP file, or it is a ZIP file with an archive
# comment. Search the end of the file for the "end of central directory"
# record signature. The comment is the last item in the ZIP file and may be
# up to 64K long. It is assumed that the "end of central directory" magic
# number does not appear in the comment.
maxCommentStart = max(filesize - (1 << 16) - sizeEndCentDir, 0)
fpin.seek(maxCommentStart, 0)
data = fpin.read()
start = data.rfind(stringEndArchive)
if start >= 0:
# found the magic number; attempt to unpack and interpret
recData = data[start:start+sizeEndCentDir]
if len(recData) != sizeEndCentDir:
# Zip file is corrupted.
return None
endrec = list(struct.unpack(structEndArchive, recData))
commentSize = endrec[_ECD_COMMENT_SIZE] #as claimed by the zip file
comment = data[start+sizeEndCentDir:start+sizeEndCentDir+commentSize]
endrec.append(comment)
endrec.append(maxCommentStart + start)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, maxCommentStart + start - filesize,
endrec)
# Unable to find a valid end of central directory structure
return None
class ZipInfo (object):
"""Class with attributes describing each file in the ZIP archive."""
__slots__ = (
'orig_filename',
'filename',
'date_time',
'compress_type',
'comment',
'extra',
'create_system',
'create_version',
'extract_version',
'reserved',
'flag_bits',
'volume',
'internal_attr',
'external_attr',
'header_offset',
'CRC',
'compress_size',
'file_size',
'_raw_time',
)
def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)):
self.orig_filename = filename # Original file name in archive
# Terminate the file name at the first null byte. Null bytes in file
# names are used as tricks by viruses in archives.
null_byte = filename.find(chr(0))
if null_byte >= 0:
filename = filename[0:null_byte]
# This is used to ensure paths in generated ZIP files always use
# forward slashes as the directory separator, as required by the
# ZIP format specification.
if os.sep != "/" and os.sep in filename:
filename = filename.replace(os.sep, "/")
self.filename = filename # Normalized file name
self.date_time = date_time # year, month, day, hour, min, sec
if date_time[0] < 1980:
raise ValueError('ZIP does not support timestamps before 1980')
# Standard values:
self.compress_type = ZIP_STORED # Type of compression for the file
self.comment = b"" # Comment for each file
self.extra = b"" # ZIP extra data
if sys.platform == 'win32':
self.create_system = 0 # System which created ZIP archive
else:
# Assume everything else is unix-y
self.create_system = 3 # System which created ZIP archive
self.create_version = DEFAULT_VERSION # Version which created ZIP archive
self.extract_version = DEFAULT_VERSION # Version needed to extract archive
self.reserved = 0 # Must be zero
self.flag_bits = 0 # ZIP flag bits
self.volume = 0 # Volume number of file header
self.internal_attr = 0 # Internal attributes
self.external_attr = 0 # External file attributes
# Other attributes are set by class ZipFile:
# header_offset Byte offset to the file header
# CRC CRC-32 of the uncompressed file
# compress_size Size of the compressed file
# file_size Size of the uncompressed file
def FileHeader(self, zip64=None):
"""Return the per-file header as a string."""
dt = self.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
if self.flag_bits & 0x08:
# Set these to zero because we write them after the file data
CRC = compress_size = file_size = 0
else:
CRC = self.CRC
compress_size = self.compress_size
file_size = self.file_size
extra = self.extra
min_version = 0
if zip64 is None:
zip64 = file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT
if zip64:
fmt = '<HHQQ'
extra = extra + struct.pack(fmt,
1, struct.calcsize(fmt)-4, file_size, compress_size)
if file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT:
if not zip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
# File is larger than what fits into a 4 byte integer,
# fall back to the ZIP64 extension
file_size = 0xffffffff
compress_size = 0xffffffff
min_version = ZIP64_VERSION
if self.compress_type == ZIP_BZIP2:
min_version = max(BZIP2_VERSION, min_version)
elif self.compress_type == ZIP_LZMA:
min_version = max(LZMA_VERSION, min_version)
self.extract_version = max(min_version, self.extract_version)
self.create_version = max(min_version, self.create_version)
filename, flag_bits = self._encodeFilenameFlags()
header = struct.pack(structFileHeader, stringFileHeader,
self.extract_version, self.reserved, flag_bits,
self.compress_type, dostime, dosdate, CRC,
compress_size, file_size,
len(filename), len(extra))
return header + filename + extra
def _encodeFilenameFlags(self):
try:
return self.filename.encode('ascii'), self.flag_bits
except UnicodeEncodeError:
return self.filename.encode('utf-8'), self.flag_bits | 0x800
def _decodeExtra(self):
# Try to decode the extra field.
extra = self.extra
unpack = struct.unpack
while extra:
tp, ln = unpack('<HH', extra[:4])
if tp == 1:
if ln >= 24:
counts = unpack('<QQQ', extra[4:28])
elif ln == 16:
counts = unpack('<QQ', extra[4:20])
elif ln == 8:
counts = unpack('<Q', extra[4:12])
elif ln == 0:
counts = ()
else:
raise RuntimeError("Corrupt extra field %s"%(ln,))
idx = 0
# ZIP64 extension (large files and/or large archives)
if self.file_size in (0xffffffffffffffff, 0xffffffff):
self.file_size = counts[idx]
idx += 1
if self.compress_size == 0xFFFFFFFF:
self.compress_size = counts[idx]
idx += 1
if self.header_offset == 0xffffffff:
old = self.header_offset
self.header_offset = counts[idx]
idx+=1
extra = extra[ln+4:]
class _ZipDecrypter:
"""Class to handle decryption of files stored within a ZIP archive.
ZIP supports a password-based form of encryption. Even though known
plaintext attacks have been found against it, it is still useful
to be able to get data out of such a file.
Usage:
zd = _ZipDecrypter(mypwd)
plain_char = zd(cypher_char)
plain_text = map(zd, cypher_text)
"""
def _GenerateCRCTable():
"""Generate a CRC-32 table.
ZIP encryption uses the CRC32 one-byte primitive for scrambling some
internal keys. We noticed that a direct implementation is faster than
relying on binascii.crc32().
"""
poly = 0xedb88320
table = [0] * 256
for i in range(256):
crc = i
for j in range(8):
if crc & 1:
crc = ((crc >> 1) & 0x7FFFFFFF) ^ poly
else:
crc = ((crc >> 1) & 0x7FFFFFFF)
table[i] = crc
return table
crctable = _GenerateCRCTable()
def _crc32(self, ch, crc):
"""Compute the CRC32 primitive on one byte."""
return ((crc >> 8) & 0xffffff) ^ self.crctable[(crc ^ ch) & 0xff]
def __init__(self, pwd):
self.key0 = 305419896
self.key1 = 591751049
self.key2 = 878082192
for p in pwd:
self._UpdateKeys(p)
def _UpdateKeys(self, c):
self.key0 = self._crc32(c, self.key0)
self.key1 = (self.key1 + (self.key0 & 255)) & 4294967295
self.key1 = (self.key1 * 134775813 + 1) & 4294967295
self.key2 = self._crc32((self.key1 >> 24) & 255, self.key2)
def __call__(self, c):
"""Decrypt a single character."""
assert isinstance(c, int)
k = self.key2 | 2
c = c ^ (((k * (k^1)) >> 8) & 255)
self._UpdateKeys(c)
return c
class LZMACompressor:
def __init__(self):
self._comp = None
def _init(self):
props = lzma._encode_filter_properties({'id': lzma.FILTER_LZMA1})
self._comp = lzma.LZMACompressor(lzma.FORMAT_RAW, filters=[
lzma._decode_filter_properties(lzma.FILTER_LZMA1, props)
])
return struct.pack('<BBH', 9, 4, len(props)) + props
def compress(self, data):
if self._comp is None:
return self._init() + self._comp.compress(data)
return self._comp.compress(data)
def flush(self):
if self._comp is None:
return self._init() + self._comp.flush()
return self._comp.flush()
class LZMADecompressor:
def __init__(self):
self._decomp = None
self._unconsumed = b''
self.eof = False
def decompress(self, data):
if self._decomp is None:
self._unconsumed += data
if len(self._unconsumed) <= 4:
return b''
psize, = struct.unpack('<H', self._unconsumed[2:4])
if len(self._unconsumed) <= 4 + psize:
return b''
self._decomp = lzma.LZMADecompressor(lzma.FORMAT_RAW, filters=[
lzma._decode_filter_properties(lzma.FILTER_LZMA1,
self._unconsumed[4:4 + psize])
])
data = self._unconsumed[4 + psize:]
del self._unconsumed
result = self._decomp.decompress(data)
self.eof = self._decomp.eof
return result
compressor_names = {
0: 'store',
1: 'shrink',
2: 'reduce',
3: 'reduce',
4: 'reduce',
5: 'reduce',
6: 'implode',
7: 'tokenize',
8: 'deflate',
9: 'deflate64',
10: 'implode',
12: 'bzip2',
14: 'lzma',
18: 'terse',
19: 'lz77',
97: 'wavpack',
98: 'ppmd',
}
def _check_compression(compression):
if compression == ZIP_STORED:
pass
elif compression == ZIP_DEFLATED:
if not zlib:
raise RuntimeError(
"Compression requires the (missing) zlib module")
elif compression == ZIP_BZIP2:
if not bz2:
raise RuntimeError(
"Compression requires the (missing) bz2 module")
elif compression == ZIP_LZMA:
if not lzma:
raise RuntimeError(
"Compression requires the (missing) lzma module")
else:
raise RuntimeError("That compression method is not supported")
def _get_compressor(compress_type):
if compress_type == ZIP_DEFLATED:
return zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
elif compress_type == ZIP_BZIP2:
return bz2.BZ2Compressor()
elif compress_type == ZIP_LZMA:
return LZMACompressor()
else:
return None
def _get_decompressor(compress_type):
if compress_type == ZIP_STORED:
return None
elif compress_type == ZIP_DEFLATED:
return zlib.decompressobj(-15)
elif compress_type == ZIP_BZIP2:
return bz2.BZ2Decompressor()
elif compress_type == ZIP_LZMA:
return LZMADecompressor()
else:
descr = compressor_names.get(compress_type)
if descr:
raise NotImplementedError("compression type %d (%s)" % (compress_type, descr))
else:
raise NotImplementedError("compression type %d" % (compress_type,))
class ZipExtFile(io.BufferedIOBase):
"""File-like object for reading an archive member.
Is returned by ZipFile.open().
"""
# Max size supported by decompressor.
MAX_N = 1 << 31 - 1
# Read from compressed files in 4k blocks.
MIN_READ_SIZE = 4096
# Search for universal newlines or line chunks.
PATTERN = re.compile(br'^(?P<chunk>[^\r\n]+)|(?P<newline>\n|\r\n?)')
def __init__(self, fileobj, mode, zipinfo, decrypter=None,
close_fileobj=False):
self._fileobj = fileobj
self._decrypter = decrypter
self._close_fileobj = close_fileobj
self._compress_type = zipinfo.compress_type
self._compress_left = zipinfo.compress_size
self._left = zipinfo.file_size
self._decompressor = _get_decompressor(self._compress_type)
self._eof = False
self._readbuffer = b''
self._offset = 0
self._universal = 'U' in mode
self.newlines = None
# Adjust read size for encrypted files since the first 12 bytes
# are for the encryption/password information.
if self._decrypter is not None:
self._compress_left -= 12
self.mode = mode
self.name = zipinfo.filename
if hasattr(zipinfo, 'CRC'):
self._expected_crc = zipinfo.CRC
self._running_crc = crc32(b'') & 0xffffffff
else:
self._expected_crc = None
def readline(self, limit=-1):
"""Read and return a line from the stream.
If limit is specified, at most limit bytes will be read.
"""
if not self._universal and limit < 0:
# Shortcut common case - newline found in buffer.
i = self._readbuffer.find(b'\n', self._offset) + 1
if i > 0:
line = self._readbuffer[self._offset: i]
self._offset = i
return line
if not self._universal:
return io.BufferedIOBase.readline(self, limit)
line = b''
while limit < 0 or len(line) < limit:
readahead = self.peek(2)
if readahead == b'':
return line
#
# Search for universal newlines or line chunks.
#
# The pattern returns either a line chunk or a newline, but not
# both. Combined with peek(2), we are assured that the sequence
# '\r\n' is always retrieved completely and never split into
# separate newlines - '\r', '\n' due to coincidental readaheads.
#
match = self.PATTERN.search(readahead)
newline = match.group('newline')
if newline is not None:
if self.newlines is None:
self.newlines = []
if newline not in self.newlines:
self.newlines.append(newline)
self._offset += len(newline)
return line + b'\n'
chunk = match.group('chunk')
if limit >= 0:
chunk = chunk[: limit - len(line)]
self._offset += len(chunk)
line += chunk
return line
def peek(self, n=1):
"""Returns buffered bytes without advancing the position."""
if n > len(self._readbuffer) - self._offset:
chunk = self.read(n)
if len(chunk) > self._offset:
self._readbuffer = chunk + self._readbuffer[self._offset:]
self._offset = 0
else:
self._offset -= len(chunk)
# Return up to 512 bytes to reduce allocation overhead for tight loops.
return self._readbuffer[self._offset: self._offset + 512]
def readable(self):
return True
def read(self, n=-1):
"""Read and return up to n bytes.
If the argument is omitted, None, or negative, data is read and returned until EOF is reached..
"""
if n is None or n < 0:
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while not self._eof:
buf += self._read1(self.MAX_N)
return buf
end = n + self._offset
if end < len(self._readbuffer):
buf = self._readbuffer[self._offset:end]
self._offset = end
return buf
n = end - len(self._readbuffer)
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while n > 0 and not self._eof:
data = self._read1(n)
if n < len(data):
self._readbuffer = data
self._offset = n
buf += data[:n]
break
buf += data
n -= len(data)
return buf
def _update_crc(self, newdata):
# Update the CRC using the given data.
if self._expected_crc is None:
# No need to compute the CRC if we don't have a reference value
return
self._running_crc = crc32(newdata, self._running_crc) & 0xffffffff
# Check the CRC if we're at the end of the file
if self._eof and self._running_crc != self._expected_crc:
raise BadZipFile("Bad CRC-32 for file %r" % self.name)
def read1(self, n):
"""Read up to n bytes with at most one read() system call."""
if n is None or n < 0:
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while not self._eof:
data = self._read1(self.MAX_N)
if data:
buf += data
break
return buf
end = n + self._offset
if end < len(self._readbuffer):
buf = self._readbuffer[self._offset:end]
self._offset = end
return buf
n = end - len(self._readbuffer)
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
if n > 0:
while not self._eof:
data = self._read1(n)
if n < len(data):
self._readbuffer = data
self._offset = n
buf += data[:n]
break
if data:
buf += data
break
return buf
def _read1(self, n):
# Read up to n compressed bytes with at most one read() system call,
# decrypt and decompress them.
if self._eof or n <= 0:
return b''
# Read from file.
if self._compress_type == ZIP_DEFLATED:
## Handle unconsumed data.
data = self._decompressor.unconsumed_tail
if n > len(data):
data += self._read2(n - len(data))
else:
data = self._read2(n)
if self._compress_type == ZIP_STORED:
self._eof = self._compress_left <= 0
elif self._compress_type == ZIP_DEFLATED:
n = max(n, self.MIN_READ_SIZE)
data = self._decompressor.decompress(data, n)
self._eof = (self._decompressor.eof or
self._compress_left <= 0 and
not self._decompressor.unconsumed_tail)
if self._eof:
data += self._decompressor.flush()
else:
data = self._decompressor.decompress(data)
self._eof = self._decompressor.eof or self._compress_left <= 0
data = data[:self._left]
self._left -= len(data)
if self._left <= 0:
self._eof = True
self._update_crc(data)
return data
def _read2(self, n):
if self._compress_left <= 0:
return b''
n = max(n, self.MIN_READ_SIZE)
n = min(n, self._compress_left)
data = self._fileobj.read(n)
self._compress_left -= len(data)
if self._decrypter is not None:
data = bytes(map(self._decrypter, data))
return data
def close(self):
try:
if self._close_fileobj:
self._fileobj.close()
finally:
super().close()
class ZipFile:
""" Class with methods to open, read, write, close, list zip files.
z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=False)
file: Either the path to the file, or a file-like object.
If it is a path, the file will be opened and closed by ZipFile.
mode: The mode can be either read "r", write "w" or append "a".
compression: ZIP_STORED (no compression), ZIP_DEFLATED (requires zlib),
ZIP_BZIP2 (requires bz2) or ZIP_LZMA (requires lzma).
allowZip64: if True ZipFile will create files with ZIP64 extensions when
needed, otherwise it will raise an exception when this would
be necessary.
"""
fp = None # Set here since __del__ checks it
_windows_illegal_name_trans_table = None
def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=False):
"""Open the ZIP file with mode read "r", write "w" or append "a"."""
if mode not in ("r", "w", "a"):
raise RuntimeError('ZipFile() requires mode "r", "w", or "a"')
_check_compression(compression)
self._allowZip64 = allowZip64
self._didModify = False
self.debug = 0 # Level of printing: 0 through 3
self.NameToInfo = {} # Find file info given name
self.filelist = [] # List of ZipInfo instances for archive
self.compression = compression # Method of compression
self.mode = key = mode.replace('b', '')[0]
self.pwd = None
self._comment = b''
# Check if we were passed a file-like object
if isinstance(file, str):
# No, it's a filename
self._filePassed = 0
self.filename = file
modeDict = {'r' : 'rb', 'w': 'wb', 'a' : 'r+b'}
try:
self.fp = io.open(file, modeDict[mode])
except IOError:
if mode == 'a':
mode = key = 'w'
self.fp = io.open(file, modeDict[mode])
else:
raise
else:
self._filePassed = 1
self.fp = file
self.filename = getattr(file, 'name', None)
try:
if key == 'r':
self._RealGetContents()
elif key == 'w':
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
elif key == 'a':
try:
# See if file is a zip file
self._RealGetContents()
# seek to start of directory and overwrite
self.fp.seek(self.start_dir, 0)
except BadZipFile:
# file is not a zip file, just append
self.fp.seek(0, 2)
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
else:
raise RuntimeError('Mode must be "r", "w" or "a"')
except:
fp = self.fp
self.fp = None
if not self._filePassed:
fp.close()
raise
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def _RealGetContents(self):
"""Read in the table of contents for the ZIP file."""
fp = self.fp
try:
endrec = _EndRecData(fp)
except IOError:
raise BadZipFile("File is not a zip file")
if not endrec:
raise BadZipFile("File is not a zip file")
if self.debug > 1:
print(endrec)
size_cd = endrec[_ECD_SIZE] # bytes in central directory
offset_cd = endrec[_ECD_OFFSET] # offset of central directory
self._comment = endrec[_ECD_COMMENT] # archive comment
# "concat" is zero, unless zip was concatenated to another file
concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
if endrec[_ECD_SIGNATURE] == stringEndArchive64:
# If Zip64 extension structures are present, account for them
concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
if self.debug > 2:
inferred = concat + offset_cd
print("given, inferred, offset", offset_cd, inferred, concat)
# self.start_dir: Position of start of central directory
self.start_dir = offset_cd + concat
fp.seek(self.start_dir, 0)
data = fp.read(size_cd)
fp = io.BytesIO(data)
total = 0
while total < size_cd:
centdir = fp.read(sizeCentralDir)
if len(centdir) != sizeCentralDir:
raise BadZipFile("Truncated central directory")
centdir = struct.unpack(structCentralDir, centdir)
if centdir[_CD_SIGNATURE] != stringCentralDir:
raise BadZipFile("Bad magic number for central directory")
if self.debug > 2:
print(centdir)
filename = fp.read(centdir[_CD_FILENAME_LENGTH])
flags = centdir[5]
if flags & 0x800:
# UTF-8 file names extension
filename = filename.decode('utf-8')
else:
# Historical ZIP filename encoding
filename = filename.decode('cp437')
# Create ZipInfo instance to store file information
x = ZipInfo(filename)
x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
(x.create_version, x.create_system, x.extract_version, x.reserved,
x.flag_bits, x.compress_type, t, d,
x.CRC, x.compress_size, x.file_size) = centdir[1:12]
if x.extract_version > MAX_EXTRACT_VERSION:
raise NotImplementedError("zip file version %.1f" %
(x.extract_version / 10))
x.volume, x.internal_attr, x.external_attr = centdir[15:18]
# Convert date/time code to (year, month, day, hour, min, sec)
x._raw_time = t
x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F,
t>>11, (t>>5)&0x3F, (t&0x1F) * 2 )
x._decodeExtra()
x.header_offset = x.header_offset + concat
self.filelist.append(x)
self.NameToInfo[x.filename] = x
# update total bytes read from central directory
total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH]
+ centdir[_CD_EXTRA_FIELD_LENGTH]
+ centdir[_CD_COMMENT_LENGTH])
if self.debug > 2:
print("total", total)
def namelist(self):
"""Return a list of file names in the archive."""
return [data.filename for data in self.filelist]
def infolist(self):
"""Return a list of class ZipInfo instances for files in the
archive."""
return self.filelist
def printdir(self, file=None):
"""Print a table of contents for the zip file."""
print("%-46s %19s %12s" % ("File Name", "Modified ", "Size"),
file=file)
for zinfo in self.filelist:
date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6]
print("%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size),
file=file)
def testzip(self):
"""Read all the files and check the CRC."""
chunk_size = 2 ** 20
for zinfo in self.filelist:
try:
# Read by chunks, to avoid an OverflowError or a
# MemoryError with very large embedded files.
with self.open(zinfo.filename, "r") as f:
while f.read(chunk_size): # Check CRC-32
pass
except BadZipFile:
return zinfo.filename
def getinfo(self, name):
"""Return the instance of ZipInfo given 'name'."""
info = self.NameToInfo.get(name)
if info is None:
raise KeyError(
'There is no item named %r in the archive' % name)
return info
def setpassword(self, pwd):
"""Set default password for encrypted files."""
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if pwd:
self.pwd = pwd
else:
self.pwd = None
@property
def comment(self):
"""The comment text associated with the ZIP file."""
return self._comment
@comment.setter
def comment(self, comment):
if not isinstance(comment, bytes):
raise TypeError("comment: expected bytes, got %s" % type(comment))
# check for valid comment length
if len(comment) >= ZIP_MAX_COMMENT:
if self.debug:
print('Archive comment is too long; truncating to %d bytes'
% ZIP_MAX_COMMENT)
comment = comment[:ZIP_MAX_COMMENT]
self._comment = comment
self._didModify = True
def read(self, name, pwd=None):
"""Return file bytes (as a string) for name."""
with self.open(name, "r", pwd) as fp:
return fp.read()
def open(self, name, mode="r", pwd=None):
"""Return file-like object for 'name'."""
if mode not in ("r", "U", "rU"):
raise RuntimeError('open() requires mode "r", "U", or "rU"')
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if not self.fp:
raise RuntimeError(
"Attempt to read ZIP archive that was already closed")
# Only open a new file for instances where we were not
# given a file object in the constructor
if self._filePassed:
zef_file = self.fp
else:
zef_file = io.open(self.filename, 'rb')
try:
# Make sure we have an info object
if isinstance(name, ZipInfo):
# 'name' is already an info object
zinfo = name
else:
# Get info object for name
zinfo = self.getinfo(name)
zef_file.seek(zinfo.header_offset, 0)
# Skip the file header:
fheader = zef_file.read(sizeFileHeader)
if len(fheader) != sizeFileHeader:
raise BadZipFile("Truncated file header")
fheader = struct.unpack(structFileHeader, fheader)
if fheader[_FH_SIGNATURE] != stringFileHeader:
raise BadZipFile("Bad magic number for file header")
fname = zef_file.read(fheader[_FH_FILENAME_LENGTH])
if fheader[_FH_EXTRA_FIELD_LENGTH]:
zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])
if zinfo.flag_bits & 0x20:
# Zip 2.7: compressed patched data
raise NotImplementedError("compressed patched data (flag bit 5)")
if zinfo.flag_bits & 0x40:
# strong encryption
raise NotImplementedError("strong encryption (flag bit 6)")
if zinfo.flag_bits & 0x800:
# UTF-8 filename
fname_str = fname.decode("utf-8")
else:
fname_str = fname.decode("cp437")
if fname_str != zinfo.orig_filename:
raise BadZipFile(
'File name in directory %r and header %r differ.'
% (zinfo.orig_filename, fname))
# check for encrypted flag & handle password
is_encrypted = zinfo.flag_bits & 0x1
zd = None
if is_encrypted:
if not pwd:
pwd = self.pwd
if not pwd:
raise RuntimeError("File %s is encrypted, password "
"required for extraction" % name)
zd = _ZipDecrypter(pwd)
# The first 12 bytes in the cypher stream is an encryption header
# used to strengthen the algorithm. The first 11 bytes are
# completely random, while the 12th contains the MSB of the CRC,
# or the MSB of the file time depending on the header type
# and is used to check the correctness of the password.
header = zef_file.read(12)
h = list(map(zd, header[0:12]))
if zinfo.flag_bits & 0x8:
# compare against the file type from extended local headers
check_byte = (zinfo._raw_time >> 8) & 0xff
else:
# compare against the CRC otherwise
check_byte = (zinfo.CRC >> 24) & 0xff
if h[11] != check_byte:
raise RuntimeError("Bad password for file", name)
return ZipExtFile(zef_file, mode, zinfo, zd,
close_fileobj=not self._filePassed)
except:
if not self._filePassed:
zef_file.close()
raise
def extract(self, member, path=None, pwd=None):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a ZipInfo object. You can
specify a different directory using `path'.
"""
if not isinstance(member, ZipInfo):
member = self.getinfo(member)
if path is None:
path = os.getcwd()
return self._extract_member(member, path, pwd)
def extractall(self, path=None, members=None, pwd=None):
"""Extract all members from the archive to the current working
directory. `path' specifies a different directory to extract to.
`members' is optional and must be a subset of the list returned
by namelist().
"""
if members is None:
members = self.namelist()
for zipinfo in members:
self.extract(zipinfo, path, pwd)
@classmethod
def _sanitize_windows_name(cls, arcname, pathsep):
"""Replace bad characters and remove trailing dots from parts."""
table = cls._windows_illegal_name_trans_table
if not table:
illegal = ':<>|"?*'
table = str.maketrans(illegal, '_' * len(illegal))
cls._windows_illegal_name_trans_table = table
arcname = arcname.translate(table)
# remove trailing dots
arcname = (x.rstrip('.') for x in arcname.split(pathsep))
# rejoin, removing empty parts.
arcname = pathsep.join(x for x in arcname if x)
return arcname
def _extract_member(self, member, targetpath, pwd):
"""Extract the ZipInfo object 'member' to a physical
file on the path targetpath.
"""
# build the destination pathname, replacing
# forward slashes to platform specific separators.
arcname = member.filename.replace('/', os.path.sep)
if os.path.altsep:
arcname = arcname.replace(os.path.altsep, os.path.sep)
# interpret absolute pathname as relative, remove drive letter or
# UNC path, redundant separators, "." and ".." components.
arcname = os.path.splitdrive(arcname)[1]
invalid_path_parts = ('', os.path.curdir, os.path.pardir)
arcname = os.path.sep.join(x for x in arcname.split(os.path.sep)
if x not in invalid_path_parts)
if os.path.sep == '\\':
# filter illegal characters on Windows
arcname = self._sanitize_windows_name(arcname, os.path.sep)
targetpath = os.path.join(targetpath, arcname)
targetpath = os.path.normpath(targetpath)
# Create all upper directories if necessary.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
os.makedirs(upperdirs)
if member.filename[-1] == '/':
if not os.path.isdir(targetpath):
os.mkdir(targetpath)
return targetpath
with self.open(member, pwd=pwd) as source, \
open(targetpath, "wb") as target:
shutil.copyfileobj(source, target)
return targetpath
def _writecheck(self, zinfo):
"""Check for errors before writing a file to the archive."""
if zinfo.filename in self.NameToInfo:
if self.debug: # Warning for duplicate names
print("Duplicate name:", zinfo.filename)
if self.mode not in ("w", "a"):
raise RuntimeError('write() requires mode "w" or "a"')
if not self.fp:
raise RuntimeError(
"Attempt to write ZIP archive that was already closed")
_check_compression(zinfo.compress_type)
if zinfo.file_size > ZIP64_LIMIT:
if not self._allowZip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
if zinfo.header_offset > ZIP64_LIMIT:
if not self._allowZip64:
raise LargeZipFile(
"Zipfile size would require ZIP64 extensions")
def write(self, filename, arcname=None, compress_type=None):
"""Put the bytes from filename into the archive under the name
arcname."""
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
st = os.stat(filename)
isdir = stat.S_ISDIR(st.st_mode)
mtime = time.localtime(st.st_mtime)
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
arcname = filename
arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
while arcname[0] in (os.sep, os.altsep):
arcname = arcname[1:]
if isdir:
arcname += '/'
zinfo = ZipInfo(arcname, date_time)
zinfo.external_attr = (st[0] & 0xFFFF) << 16 # Unix attributes
if compress_type is None:
zinfo.compress_type = self.compression
else:
zinfo.compress_type = compress_type
zinfo.file_size = st.st_size
zinfo.flag_bits = 0x00
zinfo.header_offset = self.fp.tell() # Start of header bytes
if zinfo.compress_type == ZIP_LZMA:
# Compressed data includes an end-of-stream (EOS) marker
zinfo.flag_bits |= 0x02
self._writecheck(zinfo)
self._didModify = True
if isdir:
zinfo.file_size = 0
zinfo.compress_size = 0
zinfo.CRC = 0
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
self.fp.write(zinfo.FileHeader(False))
return
cmpr = _get_compressor(zinfo.compress_type)
with open(filename, "rb") as fp:
# Must overwrite CRC and sizes with correct data later
zinfo.CRC = CRC = 0
zinfo.compress_size = compress_size = 0
# Compressed size can be larger than uncompressed size
zip64 = self._allowZip64 and \
zinfo.file_size * 1.05 > ZIP64_LIMIT
self.fp.write(zinfo.FileHeader(zip64))
file_size = 0
while 1:
buf = fp.read(1024 * 8)
if not buf:
break
file_size = file_size + len(buf)
CRC = crc32(buf, CRC) & 0xffffffff
if cmpr:
buf = cmpr.compress(buf)
compress_size = compress_size + len(buf)
self.fp.write(buf)
if cmpr:
buf = cmpr.flush()
compress_size = compress_size + len(buf)
self.fp.write(buf)
zinfo.compress_size = compress_size
else:
zinfo.compress_size = file_size
zinfo.CRC = CRC
zinfo.file_size = file_size
if not zip64 and self._allowZip64:
if file_size > ZIP64_LIMIT:
raise RuntimeError('File size has increased during compressing')
if compress_size > ZIP64_LIMIT:
raise RuntimeError('Compressed size larger than uncompressed size')
# Seek backwards and write file header (which will now include
# correct CRC and file sizes)
position = self.fp.tell() # Preserve current position in file
self.fp.seek(zinfo.header_offset, 0)
self.fp.write(zinfo.FileHeader(zip64))
self.fp.seek(position, 0)
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def writestr(self, zinfo_or_arcname, data, compress_type=None):
"""Write a file into the archive. The contents is 'data', which
may be either a 'str' or a 'bytes' instance; if it is a 'str',
it is encoded as UTF-8 first.
'zinfo_or_arcname' is either a ZipInfo instance or
the name of the file in the archive."""
if isinstance(data, str):
data = data.encode("utf-8")
if not isinstance(zinfo_or_arcname, ZipInfo):
zinfo = ZipInfo(filename=zinfo_or_arcname,
date_time=time.localtime(time.time())[:6])
zinfo.compress_type = self.compression
zinfo.external_attr = 0o600 << 16
else:
zinfo = zinfo_or_arcname
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
zinfo.file_size = len(data) # Uncompressed size
zinfo.header_offset = self.fp.tell() # Start of header data
if compress_type is not None:
zinfo.compress_type = compress_type
if zinfo.compress_type == ZIP_LZMA:
# Compressed data includes an end-of-stream (EOS) marker
zinfo.flag_bits |= 0x02
self._writecheck(zinfo)
self._didModify = True
zinfo.CRC = crc32(data) & 0xffffffff # CRC-32 checksum
co = _get_compressor(zinfo.compress_type)
if co:
data = co.compress(data) + co.flush()
zinfo.compress_size = len(data) # Compressed size
else:
zinfo.compress_size = zinfo.file_size
zip64 = zinfo.file_size > ZIP64_LIMIT or \
zinfo.compress_size > ZIP64_LIMIT
if zip64 and not self._allowZip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
self.fp.write(zinfo.FileHeader(zip64))
self.fp.write(data)
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
fmt = '<LQQ' if zip64 else '<LLL'
self.fp.write(struct.pack(fmt, zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.fp.flush()
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def __del__(self):
"""Call the "close()" method in case the user forgot."""
self.close()
def close(self):
"""Close the file, and for mode "w" and "a" write the ending
records."""
if self.fp is None:
return
try:
if self.mode in ("w", "a") and self._didModify: # write ending records
count = 0
pos1 = self.fp.tell()
for zinfo in self.filelist: # write central directory
count = count + 1
dt = zinfo.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
extra = []
if zinfo.file_size > ZIP64_LIMIT \
or zinfo.compress_size > ZIP64_LIMIT:
extra.append(zinfo.file_size)
extra.append(zinfo.compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
else:
file_size = zinfo.file_size
compress_size = zinfo.compress_size
if zinfo.header_offset > ZIP64_LIMIT:
extra.append(zinfo.header_offset)
header_offset = 0xffffffff
else:
header_offset = zinfo.header_offset
extra_data = zinfo.extra
min_version = 0
if extra:
# Append a ZIP64 field to the extra's
extra_data = struct.pack(
'<HH' + 'Q'*len(extra),
1, 8*len(extra), *extra) + extra_data
min_version = ZIP64_VERSION
if zinfo.compress_type == ZIP_BZIP2:
min_version = max(BZIP2_VERSION, min_version)
elif zinfo.compress_type == ZIP_LZMA:
min_version = max(LZMA_VERSION, min_version)
extract_version = max(min_version, zinfo.extract_version)
create_version = max(min_version, zinfo.create_version)
try:
filename, flag_bits = zinfo._encodeFilenameFlags()
centdir = struct.pack(structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset)
except DeprecationWarning:
print((structCentralDir, stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(zinfo.filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset), file=sys.stderr)
raise
self.fp.write(centdir)
self.fp.write(filename)
self.fp.write(extra_data)
self.fp.write(zinfo.comment)
pos2 = self.fp.tell()
# Write end-of-zip-archive record
centDirCount = count
centDirSize = pos2 - pos1
centDirOffset = pos1
if (centDirCount >= ZIP_FILECOUNT_LIMIT or
centDirOffset > ZIP64_LIMIT or
centDirSize > ZIP64_LIMIT):
# Need to write the ZIP64 end-of-archive records
zip64endrec = struct.pack(
structEndArchive64, stringEndArchive64,
44, 45, 45, 0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset)
self.fp.write(zip64endrec)
zip64locrec = struct.pack(
structEndArchive64Locator,
stringEndArchive64Locator, 0, pos2, 1)
self.fp.write(zip64locrec)
centDirCount = min(centDirCount, 0xFFFF)
centDirSize = min(centDirSize, 0xFFFFFFFF)
centDirOffset = min(centDirOffset, 0xFFFFFFFF)
endrec = struct.pack(structEndArchive, stringEndArchive,
0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset, len(self._comment))
self.fp.write(endrec)
self.fp.write(self._comment)
self.fp.flush()
finally:
fp = self.fp
self.fp = None
if not self._filePassed:
fp.close()
class PyZipFile(ZipFile):
"""Class to create ZIP archives with Python library files and packages."""
def __init__(self, file, mode="r", compression=ZIP_STORED,
allowZip64=False, optimize=-1):
ZipFile.__init__(self, file, mode=mode, compression=compression,
allowZip64=allowZip64)
self._optimize = optimize
def writepy(self, pathname, basename=""):
"""Add all files from "pathname" to the ZIP archive.
If pathname is a package directory, search the directory and
all package subdirectories recursively for all *.py and enter
the modules into the archive. If pathname is a plain
directory, listdir *.py and enter all modules. Else, pathname
must be a Python *.py file and the module will be put into the
archive. Added modules are always module.pyo or module.pyc.
This method will compile the module.py into module.pyc if
necessary.
"""
dir, name = os.path.split(pathname)
if os.path.isdir(pathname):
initname = os.path.join(pathname, "__init__.py")
if os.path.isfile(initname):
# This is a package directory, add it
if basename:
basename = "%s/%s" % (basename, name)
else:
basename = name
if self.debug:
print("Adding package in", pathname, "as", basename)
fname, arcname = self._get_codename(initname[0:-3], basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
dirlist = os.listdir(pathname)
dirlist.remove("__init__.py")
# Add all *.py files and package subdirectories
for filename in dirlist:
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if os.path.isdir(path):
if os.path.isfile(os.path.join(path, "__init__.py")):
# This is a package directory, add it
self.writepy(path, basename) # Recursive call
elif ext == ".py":
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
else:
# This is NOT a package directory, add its files at top level
if self.debug:
print("Adding files from directory", pathname)
for filename in os.listdir(pathname):
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if ext == ".py":
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
else:
if pathname[-3:] != ".py":
raise RuntimeError(
'Files added with writepy() must end with ".py"')
fname, arcname = self._get_codename(pathname[0:-3], basename)
if self.debug:
print("Adding file", arcname)
self.write(fname, arcname)
def _get_codename(self, pathname, basename):
"""Return (filename, archivename) for the path.
Given a module name path, return the correct file path and
archive name, compiling if necessary. For example, given
/python/lib/string, return (/python/lib/string.pyc, string).
"""
def _compile(file, optimize=-1):
import py_compile
if self.debug:
print("Compiling", file)
try:
py_compile.compile(file, doraise=True, optimize=optimize)
except py_compile.PyCompileError as err:
print(err.msg)
return False
return True
file_py = pathname + ".py"
file_pyc = pathname + ".pyc"
file_pyo = pathname + ".pyo"
pycache_pyc = imp.cache_from_source(file_py, True)
pycache_pyo = imp.cache_from_source(file_py, False)
if self._optimize == -1:
# legacy mode: use whatever file is present
if (os.path.isfile(file_pyo) and
os.stat(file_pyo).st_mtime >= os.stat(file_py).st_mtime):
# Use .pyo file.
arcname = fname = file_pyo
elif (os.path.isfile(file_pyc) and
os.stat(file_pyc).st_mtime >= os.stat(file_py).st_mtime):
# Use .pyc file.
arcname = fname = file_pyc
elif (os.path.isfile(pycache_pyc) and
os.stat(pycache_pyc).st_mtime >= os.stat(file_py).st_mtime):
# Use the __pycache__/*.pyc file, but write it to the legacy pyc
# file name in the archive.
fname = pycache_pyc
arcname = file_pyc
elif (os.path.isfile(pycache_pyo) and
os.stat(pycache_pyo).st_mtime >= os.stat(file_py).st_mtime):
# Use the __pycache__/*.pyo file, but write it to the legacy pyo
# file name in the archive.
fname = pycache_pyo
arcname = file_pyo
else:
# Compile py into PEP 3147 pyc file.
if _compile(file_py):
fname = (pycache_pyc if __debug__ else pycache_pyo)
arcname = (file_pyc if __debug__ else file_pyo)
else:
fname = arcname = file_py
else:
# new mode: use given optimization level
if self._optimize == 0:
fname = pycache_pyc
arcname = file_pyc
else:
fname = pycache_pyo
arcname = file_pyo
if not (os.path.isfile(fname) and
os.stat(fname).st_mtime >= os.stat(file_py).st_mtime):
if not _compile(file_py, optimize=self._optimize):
fname = arcname = file_py
archivename = os.path.split(arcname)[1]
if basename:
archivename = "%s/%s" % (basename, archivename)
return (fname, archivename)
def main(args = None):
import textwrap
USAGE=textwrap.dedent("""\
Usage:
zipfile.py -l zipfile.zip # Show listing of a zipfile
zipfile.py -t zipfile.zip # Test if a zipfile is valid
zipfile.py -e zipfile.zip target # Extract zipfile into target dir
zipfile.py -c zipfile.zip src ... # Create zipfile from sources
""")
if args is None:
args = sys.argv[1:]
if not args or args[0] not in ('-l', '-c', '-e', '-t'):
print(USAGE)
sys.exit(1)
if args[0] == '-l':
if len(args) != 2:
print(USAGE)
sys.exit(1)
with ZipFile(args[1], 'r') as zf:
zf.printdir()
elif args[0] == '-t':
if len(args) != 2:
print(USAGE)
sys.exit(1)
with ZipFile(args[1], 'r') as zf:
badfile = zf.testzip()
if badfile:
print("The following enclosed file is corrupted: {!r}".format(badfile))
print("Done testing")
elif args[0] == '-e':
if len(args) != 3:
print(USAGE)
sys.exit(1)
with ZipFile(args[1], 'r') as zf:
out = args[2]
for path in zf.namelist():
if path.startswith('./'):
tgt = os.path.join(out, path[2:])
else:
tgt = os.path.join(out, path)
tgtdir = os.path.dirname(tgt)
if not os.path.exists(tgtdir):
os.makedirs(tgtdir)
with open(tgt, 'wb') as fp:
fp.write(zf.read(path))
elif args[0] == '-c':
if len(args) < 3:
print(USAGE)
sys.exit(1)
def addToZip(zf, path, zippath):
if os.path.isfile(path):
zf.write(path, zippath, ZIP_DEFLATED)
elif os.path.isdir(path):
for nm in os.listdir(path):
addToZip(zf,
os.path.join(path, nm), os.path.join(zippath, nm))
# else: ignore
with ZipFile(args[1], 'w', allowZip64=True) as zf:
for src in args[2:]:
addToZip(zf, src, os.path.basename(src))
if __name__ == "__main__":
main()
|
nitin-cherian/Webapps | refs/heads/master | SimpleIsBetterThanComplex.com/myproject/.env/lib/python3.5/site-packages/setuptools/windows_support.py | 1015 | import platform
import ctypes
def windows_only(func):
if platform.system() != 'Windows':
return lambda *args, **kwargs: None
return func
@windows_only
def hide_file(path):
"""
Set the hidden attribute on a file or directory.
From http://stackoverflow.com/questions/19622133/
`path` must be text.
"""
__import__('ctypes.wintypes')
SetFileAttributes = ctypes.windll.kernel32.SetFileAttributesW
SetFileAttributes.argtypes = ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD
SetFileAttributes.restype = ctypes.wintypes.BOOL
FILE_ATTRIBUTE_HIDDEN = 0x02
ret = SetFileAttributes(path, FILE_ATTRIBUTE_HIDDEN)
if not ret:
raise ctypes.WinError()
|
google/latexify_py | refs/heads/develop | tests/node_visitor_base_test.py | 1 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for node_visitor_base."""
import pytest
from latexify import node_visitor_base
class MockVisitor(node_visitor_base.NodeVisitorBase):
"""Mock visitor class."""
def __init__(self):
# Dummy member to fail visitor invocation.
self.visit_Baz = None # pylint: disable=invalid-name
def generic_visit(self, node, action):
return 'generic_visit: {}, {}'.format(node.__class__.__name__, action)
def visit_Foo(self, node, action): # pylint: disable=invalid-name
del node
return 'visit_Foo: {}'.format(action)
def visit_Foo_abc(self, node): # pylint: disable=invalid-name
del node
return 'visit_Foo_abc'
def visit_Foo_xyz(self, node): # pylint: disable=invalid-name
del node
return 'visit_Foo_xyz'
class Foo:
pass
class Bar:
pass
class Baz:
pass
def test_generic_visit():
visitor = MockVisitor()
assert visitor.visit(Bar()) == 'generic_visit: Bar, None'
assert visitor.visit(Bar(), 'unknown') == 'generic_visit: Bar, unknown'
assert visitor.visit(Bar(), '123') == 'generic_visit: Bar, 123'
def test_visit_node():
visitor = MockVisitor()
assert visitor.visit(Foo()) == 'visit_Foo: None'
assert visitor.visit(Foo(), 'unknown') == 'visit_Foo: unknown'
assert visitor.visit(Foo(), '123') == 'visit_Foo: 123'
def test_visit_node_action():
visitor = MockVisitor()
assert visitor.visit(Foo(), 'abc') == 'visit_Foo_abc'
assert visitor.visit(Foo(), 'xyz') == 'visit_Foo_xyz'
def test_invalid_visit():
visitor = MockVisitor()
with pytest.raises(AttributeError, match='visit_Baz is not callable'):
visitor.visit(Baz())
|
mluo613/osf.io | refs/heads/develop | website/addons/box/tests/test_client.py | 36 | # -*- coding: utf-8 -*-
from nose.tools import * # noqa (PEP8 asserts)
from tests.base import OsfTestCase
from tests.factories import UserFactory
from website.addons.box.model import BoxUserSettings
class TestCore(OsfTestCase):
def setUp(self):
super(TestCore, self).setUp()
self.user = UserFactory()
self.user.add_addon('box')
self.user.save()
self.settings = self.user.get_addon('box')
self.settings.save()
def test_get_addon_returns_box_user_settings(self):
result = self.user.get_addon('box')
assert_true(isinstance(result, BoxUserSettings))
|
MrLoick/python-for-android | refs/heads/master | python-modules/twisted/twisted/mail/topfiles/setup.py | 54 | # Copyright (c) 2008 Twisted Matrix Laboratories.
# See LICENSE for details.
import sys
try:
from twisted.python import dist
except ImportError:
raise SystemExit("twisted.python.dist module not found. Make sure you "
"have installed the Twisted core package before "
"attempting to install any other Twisted projects.")
if __name__ == '__main__':
if sys.version_info[:2] >= (2, 4):
extraMeta = dict(
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: No Input/Output (Daemon)",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Topic :: Communications :: Email :: Post-Office :: IMAP",
"Topic :: Communications :: Email :: Post-Office :: POP3",
"Topic :: Software Development :: Libraries :: Python Modules",
])
else:
extraMeta = {}
dist.setup(
twisted_subproject="mail",
scripts=dist.getScripts("mail"),
# metadata
name="Twisted Mail",
description="A Twisted Mail library, server and client.",
author="Twisted Matrix Laboratories",
author_email="twisted-python@twistedmatrix.com",
maintainer="Jp Calderone",
url="http://twistedmatrix.com/trac/wiki/TwistedMail",
license="MIT",
long_description="""\
An SMTP, IMAP and POP protocol implementation together with clients
and servers.
Twisted Mail contains high-level, efficient protocol implementations
for both clients and servers of SMTP, POP3, and IMAP4. Additionally,
it contains an "out of the box" combination SMTP/POP3 virtual-hosting
mail server. Also included is a read/write Maildir implementation and
a basic Mail Exchange calculator.
""",
**extraMeta)
|
skycucumber/Messaging-Gateway | refs/heads/master | webapp/venv/lib/python2.7/site-packages/setuptools/tests/doctest.py | 332 | # Module doctest.
# Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org).
# Major enhancements and refactoring by:
# Jim Fulton
# Edward Loper
# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
try:
basestring
except NameError:
basestring = str
try:
enumerate
except NameError:
def enumerate(seq):
return zip(range(len(seq)),seq)
r"""Module doctest -- a framework for running examples in docstrings.
In simplest use, end each module M to be tested with:
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
Then running the module as a script will cause the examples in the
docstrings to get executed and verified:
python M.py
This won't display anything unless an example fails, in which case the
failing example(s) and the cause(s) of the failure(s) are printed to stdout
(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
line of output is "Test failed.".
Run it with the -v switch instead:
python M.py -v
and a detailed report of all examples tried is printed to stdout, along
with assorted summaries at the end.
You can force verbose mode by passing "verbose=True" to testmod, or prohibit
it by passing "verbose=False". In either of those cases, sys.argv is not
examined by testmod.
There are a variety of other ways to run doctests, including integration
with the unittest framework, and support for running non-Python text
files containing doctests. There are also many ways to override parts
of doctest's default behaviors. See the Library Reference Manual for
details.
"""
__docformat__ = 'reStructuredText en'
__all__ = [
# 0, Option Flags
'register_optionflag',
'DONT_ACCEPT_TRUE_FOR_1',
'DONT_ACCEPT_BLANKLINE',
'NORMALIZE_WHITESPACE',
'ELLIPSIS',
'IGNORE_EXCEPTION_DETAIL',
'COMPARISON_FLAGS',
'REPORT_UDIFF',
'REPORT_CDIFF',
'REPORT_NDIFF',
'REPORT_ONLY_FIRST_FAILURE',
'REPORTING_FLAGS',
# 1. Utility Functions
'is_private',
# 2. Example & DocTest
'Example',
'DocTest',
# 3. Doctest Parser
'DocTestParser',
# 4. Doctest Finder
'DocTestFinder',
# 5. Doctest Runner
'DocTestRunner',
'OutputChecker',
'DocTestFailure',
'UnexpectedException',
'DebugRunner',
# 6. Test Functions
'testmod',
'testfile',
'run_docstring_examples',
# 7. Tester
'Tester',
# 8. Unittest Support
'DocTestSuite',
'DocFileSuite',
'set_unittest_reportflags',
# 9. Debugging Support
'script_from_examples',
'testsource',
'debug_src',
'debug',
]
import __future__
import sys, traceback, inspect, linecache, os, re, types
import unittest, difflib, pdb, tempfile
import warnings
from setuptools.compat import StringIO, execfile, func_code, im_func
# Don't whine about the deprecated is_private function in this
# module's tests.
warnings.filterwarnings("ignore", "is_private", DeprecationWarning,
__name__, 0)
# There are 4 basic classes:
# - Example: a <source, want> pair, plus an intra-docstring line number.
# - DocTest: a collection of examples, parsed from a docstring, plus
# info about where the docstring came from (name, filename, lineno).
# - DocTestFinder: extracts DocTests from a given object's docstring and
# its contained objects' docstrings.
# - DocTestRunner: runs DocTest cases, and accumulates statistics.
#
# So the basic picture is:
#
# list of:
# +------+ +---------+ +-------+
# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
# +------+ +---------+ +-------+
# | Example |
# | ... |
# | Example |
# +---------+
# Option constants.
OPTIONFLAGS_BY_NAME = {}
def register_optionflag(name):
flag = 1 << len(OPTIONFLAGS_BY_NAME)
OPTIONFLAGS_BY_NAME[name] = flag
return flag
DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
ELLIPSIS = register_optionflag('ELLIPSIS')
IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
DONT_ACCEPT_BLANKLINE |
NORMALIZE_WHITESPACE |
ELLIPSIS |
IGNORE_EXCEPTION_DETAIL)
REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
REPORTING_FLAGS = (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF |
REPORT_ONLY_FIRST_FAILURE)
# Special string markers for use in `want` strings:
BLANKLINE_MARKER = '<BLANKLINE>'
ELLIPSIS_MARKER = '...'
######################################################################
## Table of Contents
######################################################################
# 1. Utility Functions
# 2. Example & DocTest -- store test cases
# 3. DocTest Parser -- extracts examples from strings
# 4. DocTest Finder -- extracts test cases from objects
# 5. DocTest Runner -- runs test cases
# 6. Test Functions -- convenient wrappers for testing
# 7. Tester Class -- for backwards compatibility
# 8. Unittest Support
# 9. Debugging Support
# 10. Example Usage
######################################################################
## 1. Utility Functions
######################################################################
def is_private(prefix, base):
"""prefix, base -> true iff name prefix + "." + base is "private".
Prefix may be an empty string, and base does not contain a period.
Prefix is ignored (although functions you write conforming to this
protocol may make use of it).
Return true iff base begins with an (at least one) underscore, but
does not both begin and end with (at least) two underscores.
>>> is_private("a.b", "my_func")
False
>>> is_private("____", "_my_func")
True
>>> is_private("someclass", "__init__")
False
>>> is_private("sometypo", "__init_")
True
>>> is_private("x.y.z", "_")
True
>>> is_private("_x.y.z", "__")
False
>>> is_private("", "") # senseless but consistent
False
"""
warnings.warn("is_private is deprecated; it wasn't useful; "
"examine DocTestFinder.find() lists instead",
DeprecationWarning, stacklevel=2)
return base[:1] == "_" and not base[:2] == "__" == base[-2:]
def _extract_future_flags(globs):
"""
Return the compiler-flags associated with the future features that
have been imported into the given namespace (globs).
"""
flags = 0
for fname in __future__.all_feature_names:
feature = globs.get(fname, None)
if feature is getattr(__future__, fname):
flags |= feature.compiler_flag
return flags
def _normalize_module(module, depth=2):
"""
Return the module specified by `module`. In particular:
- If `module` is a module, then return module.
- If `module` is a string, then import and return the
module with that name.
- If `module` is None, then return the calling module.
The calling module is assumed to be the module of
the stack frame at the given depth in the call stack.
"""
if inspect.ismodule(module):
return module
elif isinstance(module, basestring):
return __import__(module, globals(), locals(), ["*"])
elif module is None:
return sys.modules[sys._getframe(depth).f_globals['__name__']]
else:
raise TypeError("Expected a module, string, or None")
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning every
non-blank line in `s`, and return the result.
"""
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
def _exception_traceback(exc_info):
"""
Return a string containing a traceback message for the given
exc_info tuple (as returned by sys.exc_info()).
"""
# Get a traceback message.
excout = StringIO()
exc_type, exc_val, exc_tb = exc_info
traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
return excout.getvalue()
# Override some StringIO methods.
class _SpoofOut(StringIO):
def getvalue(self):
result = StringIO.getvalue(self)
# If anything at all was written, make sure there's a trailing
# newline. There's no way for the expected output to indicate
# that a trailing newline is missing.
if result and not result.endswith("\n"):
result += "\n"
# Prevent softspace from screwing up the next test case, in
# case they used print with a trailing comma in an example.
if hasattr(self, "softspace"):
del self.softspace
return result
def truncate(self, size=None):
StringIO.truncate(self, size)
if hasattr(self, "softspace"):
del self.softspace
# Worst-case linear-time ellipsis matching.
def _ellipsis_match(want, got):
"""
Essentially the only subtle case:
>>> _ellipsis_match('aa...aa', 'aaa')
False
"""
if want.find(ELLIPSIS_MARKER)==-1:
return want == got
# Find "the real" strings.
ws = want.split(ELLIPSIS_MARKER)
assert len(ws) >= 2
# Deal with exact matches possibly needed at one or both ends.
startpos, endpos = 0, len(got)
w = ws[0]
if w: # starts with exact match
if got.startswith(w):
startpos = len(w)
del ws[0]
else:
return False
w = ws[-1]
if w: # ends with exact match
if got.endswith(w):
endpos -= len(w)
del ws[-1]
else:
return False
if startpos > endpos:
# Exact end matches required more characters than we have, as in
# _ellipsis_match('aa...aa', 'aaa')
return False
# For the rest, we only need to find the leftmost non-overlapping
# match for each piece. If there's no overall match that way alone,
# there's no overall match period.
for w in ws:
# w may be '' at times, if there are consecutive ellipses, or
# due to an ellipsis at the start or end of `want`. That's OK.
# Search for an empty string succeeds, and doesn't change startpos.
startpos = got.find(w, startpos, endpos)
if startpos < 0:
return False
startpos += len(w)
return True
def _comment_line(line):
"Return a commented form of the given line"
line = line.rstrip()
if line:
return '# '+line
else:
return '#'
class _OutputRedirectingPdb(pdb.Pdb):
"""
A specialized version of the python debugger that redirects stdout
to a given stream when interacting with the user. Stdout is *not*
redirected when traced code is executed.
"""
def __init__(self, out):
self.__out = out
pdb.Pdb.__init__(self)
def trace_dispatch(self, *args):
# Redirect stdout to the given stream.
save_stdout = sys.stdout
sys.stdout = self.__out
# Call Pdb's trace dispatch method.
try:
return pdb.Pdb.trace_dispatch(self, *args)
finally:
sys.stdout = save_stdout
# [XX] Normalize with respect to os.path.pardir?
def _module_relative_path(module, path):
if not inspect.ismodule(module):
raise TypeError('Expected a module: %r' % module)
if path.startswith('/'):
raise ValueError('Module-relative files may not have absolute paths')
# Find the base directory for the path.
if hasattr(module, '__file__'):
# A normal module/package
basedir = os.path.split(module.__file__)[0]
elif module.__name__ == '__main__':
# An interactive session.
if len(sys.argv)>0 and sys.argv[0] != '':
basedir = os.path.split(sys.argv[0])[0]
else:
basedir = os.curdir
else:
# A module w/o __file__ (this includes builtins)
raise ValueError("Can't resolve paths relative to the module " +
module + " (it has no __file__)")
# Combine the base directory and the path.
return os.path.join(basedir, *(path.split('/')))
######################################################################
## 2. Example & DocTest
######################################################################
## - An "example" is a <source, want> pair, where "source" is a
## fragment of source code, and "want" is the expected output for
## "source." The Example class also includes information about
## where the example was extracted from.
##
## - A "doctest" is a collection of examples, typically extracted from
## a string (such as an object's docstring). The DocTest class also
## includes information about where the string was extracted from.
class Example:
"""
A single doctest example, consisting of source code and expected
output. `Example` defines the following attributes:
- source: A single Python statement, always ending with a newline.
The constructor adds a newline if needed.
- want: The expected output from running the source code (either
from stdout, or a traceback in case of exception). `want` ends
with a newline unless it's empty, in which case it's an empty
string. The constructor adds a newline if needed.
- exc_msg: The exception message generated by the example, if
the example is expected to generate an exception; or `None` if
it is not expected to generate an exception. This exception
message is compared against the return value of
`traceback.format_exception_only()`. `exc_msg` ends with a
newline unless it's `None`. The constructor adds a newline
if needed.
- lineno: The line number within the DocTest string containing
this Example where the Example begins. This line number is
zero-based, with respect to the beginning of the DocTest.
- indent: The example's indentation in the DocTest string.
I.e., the number of space characters that preceed the
example's first prompt.
- options: A dictionary mapping from option flags to True or
False, which is used to override default options for this
example. Any option flags not contained in this dictionary
are left at their default value (as specified by the
DocTestRunner's optionflags). By default, no options are set.
"""
def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
options=None):
# Normalize inputs.
if not source.endswith('\n'):
source += '\n'
if want and not want.endswith('\n'):
want += '\n'
if exc_msg is not None and not exc_msg.endswith('\n'):
exc_msg += '\n'
# Store properties.
self.source = source
self.want = want
self.lineno = lineno
self.indent = indent
if options is None: options = {}
self.options = options
self.exc_msg = exc_msg
class DocTest:
"""
A collection of doctest examples that should be run in a single
namespace. Each `DocTest` defines the following attributes:
- examples: the list of examples.
- globs: The namespace (aka globals) that the examples should
be run in.
- name: A name identifying the DocTest (typically, the name of
the object whose docstring this DocTest was extracted from).
- filename: The name of the file that this DocTest was extracted
from, or `None` if the filename is unknown.
- lineno: The line number within filename where this DocTest
begins, or `None` if the line number is unavailable. This
line number is zero-based, with respect to the beginning of
the file.
- docstring: The string that the examples were extracted from,
or `None` if the string is unavailable.
"""
def __init__(self, examples, globs, name, filename, lineno, docstring):
"""
Create a new DocTest containing the given examples. The
DocTest's globals are initialized with a copy of `globs`.
"""
assert not isinstance(examples, basestring), \
"DocTest no longer accepts str; use DocTestParser instead"
self.examples = examples
self.docstring = docstring
self.globs = globs.copy()
self.name = name
self.filename = filename
self.lineno = lineno
def __repr__(self):
if len(self.examples) == 0:
examples = 'no examples'
elif len(self.examples) == 1:
examples = '1 example'
else:
examples = '%d examples' % len(self.examples)
return ('<DocTest %s from %s:%s (%s)>' %
(self.name, self.filename, self.lineno, examples))
# This lets us sort tests by name:
def __cmp__(self, other):
if not isinstance(other, DocTest):
return -1
return cmp((self.name, self.filename, self.lineno, id(self)),
(other.name, other.filename, other.lineno, id(other)))
######################################################################
## 3. DocTestParser
######################################################################
class DocTestParser:
"""
A class used to parse strings containing doctest examples.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
.*$\n? # But any other line
)*)
''', re.MULTILINE | re.VERBOSE)
# A regular expression for handling `want` strings that contain
# expected exceptions. It divides `want` into three pieces:
# - the traceback header line (`hdr`)
# - the traceback stack (`stack`)
# - the exception message (`msg`), as generated by
# traceback.format_exception_only()
# `msg` may have multiple lines. We assume/require that the
# exception message is the first non-indented line starting with a word
# character following the traceback header line.
_EXCEPTION_RE = re.compile(r"""
# Grab the traceback header. Different versions of Python have
# said different things on the first traceback line.
^(?P<hdr> Traceback\ \(
(?: most\ recent\ call\ last
| innermost\ last
) \) :
)
\s* $ # toss trailing whitespace on the header.
(?P<stack> .*?) # don't blink: absorb stuff until...
^ (?P<msg> \w+ .*) # a line *starts* with alphanum.
""", re.VERBOSE | re.MULTILINE | re.DOTALL)
# A callable returning a true value iff its argument is a blank line
# or contains a single comment.
_IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
def parse(self, string, name='<string>'):
"""
Divide the given string into examples and intervening text,
and return them as a list of alternating Examples and strings.
Line numbers for the Examples are 0-based. The optional
argument `name` is a name identifying this string, and is only
used for error messages.
"""
string = string.expandtabs()
# If all lines begin with the same indentation, then strip it.
min_indent = self._min_indent(string)
if min_indent > 0:
string = '\n'.join([l[min_indent:] for l in string.split('\n')])
output = []
charno, lineno = 0, 0
# Find all doctest examples in the string:
for m in self._EXAMPLE_RE.finditer(string):
# Add the pre-example text to `output`.
output.append(string[charno:m.start()])
# Update lineno (lines before this example)
lineno += string.count('\n', charno, m.start())
# Extract info from the regexp match.
(source, options, want, exc_msg) = \
self._parse_example(m, name, lineno)
# Create an Example, and add it to the list.
if not self._IS_BLANK_OR_COMMENT(source):
output.append( Example(source, want, exc_msg,
lineno=lineno,
indent=min_indent+len(m.group('indent')),
options=options) )
# Update lineno (lines inside this example)
lineno += string.count('\n', m.start(), m.end())
# Update charno.
charno = m.end()
# Add any remaining post-example text to `output`.
output.append(string[charno:])
return output
def get_doctest(self, string, globs, name, filename, lineno):
"""
Extract all doctest examples from the given string, and
collect them into a `DocTest` object.
`globs`, `name`, `filename`, and `lineno` are attributes for
the new `DocTest` object. See the documentation for `DocTest`
for more information.
"""
return DocTest(self.get_examples(string, name), globs,
name, filename, lineno, string)
def get_examples(self, string, name='<string>'):
"""
Extract all doctest examples from the given string, and return
them as a list of `Example` objects. Line numbers are
0-based, because it's most common in doctests that nothing
interesting appears on the same line as opening triple-quote,
and so the first interesting line is called \"line 1\" then.
The optional argument `name` is a name identifying this
string, and is only used for error messages.
"""
return [x for x in self.parse(string, name)
if isinstance(x, Example)]
def _parse_example(self, m, name, lineno):
"""
Given a regular expression match from `_EXAMPLE_RE` (`m`),
return a pair `(source, want)`, where `source` is the matched
example's source code (with prompts and indentation stripped);
and `want` is the example's expected output (with indentation
stripped).
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
# Get the example's indentation level.
indent = len(m.group('indent'))
# Divide source into lines; check that they're properly
# indented; and then strip their indentation & prompts.
source_lines = m.group('source').split('\n')
self._check_prompt_blank(source_lines, indent, name, lineno)
self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
source = '\n'.join([sl[indent+4:] for sl in source_lines])
# Divide want into lines; check that it's properly indented; and
# then strip the indentation. Spaces before the last newline should
# be preserved, so plain rstrip() isn't good enough.
want = m.group('want')
want_lines = want.split('\n')
if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
del want_lines[-1] # forget final newline & spaces after it
self._check_prefix(want_lines, ' '*indent, name,
lineno + len(source_lines))
want = '\n'.join([wl[indent:] for wl in want_lines])
# If `want` contains a traceback message, then extract it.
m = self._EXCEPTION_RE.match(want)
if m:
exc_msg = m.group('msg')
else:
exc_msg = None
# Extract options from the source.
options = self._find_options(source, name, lineno)
return source, options, want, exc_msg
# This regular expression looks for option directives in the
# source code of an example. Option directives are comments
# starting with "doctest:". Warning: this may give false
# positives for string-literals that contain the string
# "#doctest:". Eliminating these false positives would require
# actually parsing the string; but we limit them by ignoring any
# line containing "#doctest:" that is *followed* by a quote mark.
_OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
re.MULTILINE)
def _find_options(self, source, name, lineno):
"""
Return a dictionary containing option overrides extracted from
option directives in the given source string.
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
options = {}
# (note: with the current regexp, this will match at most once:)
for m in self._OPTION_DIRECTIVE_RE.finditer(source):
option_strings = m.group(1).replace(',', ' ').split()
for option in option_strings:
if (option[0] not in '+-' or
option[1:] not in OPTIONFLAGS_BY_NAME):
raise ValueError('line %r of the doctest for %s '
'has an invalid option: %r' %
(lineno+1, name, option))
flag = OPTIONFLAGS_BY_NAME[option[1:]]
options[flag] = (option[0] == '+')
if options and self._IS_BLANK_OR_COMMENT(source):
raise ValueError('line %r of the doctest for %s has an option '
'directive on a line with no example: %r' %
(lineno, name, source))
return options
# This regular expression finds the indentation of every non-blank
# line in a string.
_INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
def _min_indent(self, s):
"Return the minimum indentation of any non-blank line in `s`"
indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
if len(indents) > 0:
return min(indents)
else:
return 0
def _check_prompt_blank(self, lines, indent, name, lineno):
"""
Given the lines of a source string (including prompts and
leading indentation), check to make sure that every prompt is
followed by a space character. If any line is not followed by
a space character, then raise ValueError.
"""
for i, line in enumerate(lines):
if len(line) >= indent+4 and line[indent+3] != ' ':
raise ValueError('line %r of the docstring for %s '
'lacks blank after %s: %r' %
(lineno+i+1, name,
line[indent:indent+3], line))
def _check_prefix(self, lines, prefix, name, lineno):
"""
Check that every line in the given list starts with the given
prefix; if any line does not, then raise a ValueError.
"""
for i, line in enumerate(lines):
if line and not line.startswith(prefix):
raise ValueError('line %r of the docstring for %s has '
'inconsistent leading whitespace: %r' %
(lineno+i+1, name, line))
######################################################################
## 4. DocTest Finder
######################################################################
class DocTestFinder:
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
"""
def __init__(self, verbose=False, parser=DocTestParser(),
recurse=True, _namefilter=None, exclude_empty=True):
"""
Create a new doctest finder.
The optional argument `parser` specifies a class or
function that should be used to create new DocTest objects (or
objects that implement the same interface as DocTest). The
signature for this factory function should match the signature
of the DocTest constructor.
If the optional argument `recurse` is false, then `find` will
only examine the given object, and not any contained objects.
If the optional argument `exclude_empty` is false, then `find`
will include tests for objects with empty docstrings.
"""
self._parser = parser
self._verbose = verbose
self._recurse = recurse
self._exclude_empty = exclude_empty
# _namefilter is undocumented, and exists only for temporary backward-
# compatibility support of testmod's deprecated isprivate mess.
self._namefilter = _namefilter
def find(self, obj, name=None, module=None, globs=None,
extraglobs=None):
"""
Return a list of the DocTests that are defined by the given
object's docstring, or by any of its contained objects'
docstrings.
The optional parameter `module` is the module that contains
the given object. If the module is not specified or is None, then
the test finder will attempt to automatically determine the
correct module. The object's module is used:
- As a default namespace, if `globs` is not specified.
- To prevent the DocTestFinder from extracting DocTests
from objects that are imported from other modules.
- To find the name of the file containing the object.
- To help find the line number of the object within its
file.
Contained objects whose module does not match `module` are ignored.
If `module` is False, no attempt to find the module will be made.
This is obscure, of use mostly in tests: if `module` is False, or
is None but cannot be found automatically, then all objects are
considered to belong to the (non-existent) module, so all contained
objects will (recursively) be searched for doctests.
The globals for each DocTest is formed by combining `globs`
and `extraglobs` (bindings in `extraglobs` override bindings
in `globs`). A new copy of the globals dictionary is created
for each DocTest. If `globs` is not specified, then it
defaults to the module's `__dict__`, if specified, or {}
otherwise. If `extraglobs` is not specified, then it defaults
to {}.
"""
# If name was not specified, then extract it from the object.
if name is None:
name = getattr(obj, '__name__', None)
if name is None:
raise ValueError("DocTestFinder.find: name must be given "
"when obj.__name__ doesn't exist: %r" %
(type(obj),))
# Find the module that contains the given object (if obj is
# a module, then module=obj.). Note: this may fail, in which
# case module will be None.
if module is False:
module = None
elif module is None:
module = inspect.getmodule(obj)
# Read the module's source code. This is used by
# DocTestFinder._find_lineno to find the line number for a
# given object's docstring.
try:
file = inspect.getsourcefile(obj) or inspect.getfile(obj)
source_lines = linecache.getlines(file)
if not source_lines:
source_lines = None
except TypeError:
source_lines = None
# Initialize globals, and merge in extraglobs.
if globs is None:
if module is None:
globs = {}
else:
globs = module.__dict__.copy()
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
# Recursively expore `obj`, extracting DocTests.
tests = []
self._find(tests, obj, name, module, source_lines, globs, {})
return tests
def _filter(self, obj, prefix, base):
"""
Return true if the given object should not be examined.
"""
return (self._namefilter is not None and
self._namefilter(prefix, base))
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
return True
elif inspect.isfunction(object):
return module.__dict__ is func_globals(object)
elif inspect.isclass(object):
return module.__name__ == object.__module__
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
if self._verbose:
print('Finding tests in %s' % name)
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
# Look for tests in a module's contained objects.
if inspect.ismodule(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Check if this contained object should be ignored.
if self._filter(val, name, valname):
continue
valname = '%s.%s' % (name, valname)
# Recurse to functions & classes.
if ((inspect.isfunction(val) or inspect.isclass(val)) and
self._from_module(module, val)):
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a module's __test__ dictionary.
if inspect.ismodule(obj) and self._recurse:
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, basestring):
raise ValueError("DocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, basestring)):
raise ValueError("DocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Check if this contained object should be ignored.
if self._filter(val, name, valname):
continue
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = im_func(getattr(obj, valname))
# Recurse to methods, properties, and nested classes.
if ((inspect.isfunction(val) or inspect.isclass(val) or
isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, basestring):
docstring = obj
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, basestring):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Find the docstring's location in the file.
lineno = self._find_lineno(obj, source_lines)
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
def _find_lineno(self, obj, source_lines):
"""
Return a line number of the given object's docstring. Note:
this method assumes that the object has a docstring.
"""
lineno = None
# Find the line number for modules.
if inspect.ismodule(obj):
lineno = 0
# Find the line number for classes.
# Note: this could be fooled if a class is defined multiple
# times in a single file.
if inspect.isclass(obj):
if source_lines is None:
return None
pat = re.compile(r'^\s*class\s*%s\b' %
getattr(obj, '__name__', '-'))
for i, line in enumerate(source_lines):
if pat.match(line):
lineno = i
break
# Find the line number for functions & methods.
if inspect.ismethod(obj): obj = im_func(obj)
if inspect.isfunction(obj): obj = func_code(obj)
if inspect.istraceback(obj): obj = obj.tb_frame
if inspect.isframe(obj): obj = obj.f_code
if inspect.iscode(obj):
lineno = getattr(obj, 'co_firstlineno', None)-1
# Find the line number where the docstring starts. Assume
# that it's the first line that begins with a quote mark.
# Note: this could be fooled by a multiline function
# signature, where a continuation line begins with a quote
# mark.
if lineno is not None:
if source_lines is None:
return lineno+1
pat = re.compile('(^|.*:)\s*\w*("|\')')
for lineno in range(lineno, len(source_lines)):
if pat.match(source_lines[lineno]):
return lineno
# We couldn't find the line number.
return None
######################################################################
## 5. DocTest Runner
######################################################################
class DocTestRunner:
"""
A class used to run DocTest test cases, and accumulate statistics.
The `run` method is used to process a single DocTest case. It
returns a tuple `(f, t)`, where `t` is the number of test cases
tried, and `f` is the number of test cases that failed.
>>> tests = DocTestFinder().find(_TestClass)
>>> runner = DocTestRunner(verbose=False)
>>> for test in tests:
... print runner.run(test)
(0, 2)
(0, 1)
(0, 2)
(0, 2)
The `summarize` method prints a summary of all the test cases that
have been run by the runner, and returns an aggregated `(f, t)`
tuple:
>>> runner.summarize(verbose=1)
4 items passed all tests:
2 tests in _TestClass
2 tests in _TestClass.__init__
2 tests in _TestClass.get
1 tests in _TestClass.square
7 tests in 4 items.
7 passed and 0 failed.
Test passed.
(0, 7)
The aggregated number of tried examples and failed examples is
also available via the `tries` and `failures` attributes:
>>> runner.tries
7
>>> runner.failures
0
The comparison between expected outputs and actual outputs is done
by an `OutputChecker`. This comparison may be customized with a
number of option flags; see the documentation for `testmod` for
more information. If the option flags are insufficient, then the
comparison may also be customized by passing a subclass of
`OutputChecker` to the constructor.
The test runner's display output can be controlled in two ways.
First, an output function (`out) can be passed to
`TestRunner.run`; this function will be called with strings that
should be displayed. It defaults to `sys.stdout.write`. If
capturing the output is not sufficient, then the display output
can be also customized by subclassing DocTestRunner, and
overriding the methods `report_start`, `report_success`,
`report_unexpected_exception`, and `report_failure`.
"""
# This divider string is used to separate failure messages, and to
# separate sections of the summary.
DIVIDER = "*" * 70
def __init__(self, checker=None, verbose=None, optionflags=0):
"""
Create a new test runner.
Optional keyword arg `checker` is the `OutputChecker` that
should be used to compare the expected outputs and actual
outputs of doctest examples.
Optional keyword arg 'verbose' prints lots of stuff if true,
only failures if false; by default, it's true iff '-v' is in
sys.argv.
Optional argument `optionflags` can be used to control how the
test runner compares expected output to actual output, and how
it displays failures. See the documentation for `testmod` for
more information.
"""
self._checker = checker or OutputChecker()
if verbose is None:
verbose = '-v' in sys.argv
self._verbose = verbose
self.optionflags = optionflags
self.original_optionflags = optionflags
# Keep track of the examples we've run.
self.tries = 0
self.failures = 0
self._name2ft = {}
# Create a fake output target for capturing doctest output.
self._fakeout = _SpoofOut()
#/////////////////////////////////////////////////////////////////
# Reporting methods
#/////////////////////////////////////////////////////////////////
def report_start(self, out, test, example):
"""
Report that the test runner is about to process the given
example. (Only displays a message if verbose=True)
"""
if self._verbose:
if example.want:
out('Trying:\n' + _indent(example.source) +
'Expecting:\n' + _indent(example.want))
else:
out('Trying:\n' + _indent(example.source) +
'Expecting nothing\n')
def report_success(self, out, test, example, got):
"""
Report that the given example ran successfully. (Only
displays a message if verbose=True)
"""
if self._verbose:
out("ok\n")
def report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
out(self._failure_header(test, example) +
self._checker.output_difference(example, got, self.optionflags))
def report_unexpected_exception(self, out, test, example, exc_info):
"""
Report that the given example raised an unexpected exception.
"""
out(self._failure_header(test, example) +
'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
def _failure_header(self, test, example):
out = [self.DIVIDER]
if test.filename:
if test.lineno is not None and example.lineno is not None:
lineno = test.lineno + example.lineno + 1
else:
lineno = '?'
out.append('File "%s", line %s, in %s' %
(test.filename, lineno, test.name))
else:
out.append('Line %s, in %s' % (example.lineno+1, test.name))
out.append('Failed example:')
source = example.source
out.append(_indent(source))
return '\n'.join(out)
#/////////////////////////////////////////////////////////////////
# DocTest Running
#/////////////////////////////////////////////////////////////////
def __run(self, test, compileflags, out):
"""
Run the examples in `test`. Write the outcome of each example
with one of the `DocTestRunner.report_*` methods, using the
writer function `out`. `compileflags` is the set of compiler
flags that should be used to execute examples. Return a tuple
`(f, t)`, where `t` is the number of examples tried, and `f`
is the number of examples that failed. The examples are run
in the namespace `test.globs`.
"""
# Keep track of the number of failures and tries.
failures = tries = 0
# Save the option flags (since option directives can be used
# to modify them).
original_optionflags = self.optionflags
SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
check = self._checker.check_output
# Process each example.
for examplenum, example in enumerate(test.examples):
# If REPORT_ONLY_FIRST_FAILURE is set, then supress
# reporting after the first failure.
quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
failures > 0)
# Merge in the example's options.
self.optionflags = original_optionflags
if example.options:
for (optionflag, val) in example.options.items():
if val:
self.optionflags |= optionflag
else:
self.optionflags &= ~optionflag
# Record that we started this example.
tries += 1
if not quiet:
self.report_start(out, test, example)
# Use a special filename for compile(), so we can retrieve
# the source code during interactive debugging (see
# __patched_linecache_getlines).
filename = '<doctest %s[%d]>' % (test.name, examplenum)
# Run the example in the given context (globs), and record
# any exception that gets raised. (But don't intercept
# keyboard interrupts.)
try:
# Don't blink! This is where the user's code gets run.
exec(compile(example.source, filename, "single",
compileflags, 1), test.globs)
self.debugger.set_continue() # ==== Example Finished ====
exception = None
except KeyboardInterrupt:
raise
except:
exception = sys.exc_info()
self.debugger.set_continue() # ==== Example Finished ====
got = self._fakeout.getvalue() # the actual output
self._fakeout.truncate(0)
outcome = FAILURE # guilty until proved innocent or insane
# If the example executed without raising any exceptions,
# verify its output.
if exception is None:
if check(example.want, got, self.optionflags):
outcome = SUCCESS
# The example raised an exception: check if it was expected.
else:
exc_info = sys.exc_info()
exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
if not quiet:
got += _exception_traceback(exc_info)
# If `example.exc_msg` is None, then we weren't expecting
# an exception.
if example.exc_msg is None:
outcome = BOOM
# We expected an exception: see whether it matches.
elif check(example.exc_msg, exc_msg, self.optionflags):
outcome = SUCCESS
# Another chance if they didn't care about the detail.
elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
m1 = re.match(r'[^:]*:', example.exc_msg)
m2 = re.match(r'[^:]*:', exc_msg)
if m1 and m2 and check(m1.group(0), m2.group(0),
self.optionflags):
outcome = SUCCESS
# Report the outcome.
if outcome is SUCCESS:
if not quiet:
self.report_success(out, test, example, got)
elif outcome is FAILURE:
if not quiet:
self.report_failure(out, test, example, got)
failures += 1
elif outcome is BOOM:
if not quiet:
self.report_unexpected_exception(out, test, example,
exc_info)
failures += 1
else:
assert False, ("unknown outcome", outcome)
# Restore the option flags (in case they were modified)
self.optionflags = original_optionflags
# Record and return the number of failures and tries.
self.__record_outcome(test, failures, tries)
return failures, tries
def __record_outcome(self, test, f, t):
"""
Record the fact that the given DocTest (`test`) generated `f`
failures out of `t` tried examples.
"""
f2, t2 = self._name2ft.get(test.name, (0,0))
self._name2ft[test.name] = (f+f2, t+t2)
self.failures += f
self.tries += t
__LINECACHE_FILENAME_RE = re.compile(r'<doctest '
r'(?P<name>[\w\.]+)'
r'\[(?P<examplenum>\d+)\]>$')
def __patched_linecache_getlines(self, filename, module_globals=None):
m = self.__LINECACHE_FILENAME_RE.match(filename)
if m and m.group('name') == self.test.name:
example = self.test.examples[int(m.group('examplenum'))]
return example.source.splitlines(True)
elif func_code(self.save_linecache_getlines).co_argcount > 1:
return self.save_linecache_getlines(filename, module_globals)
else:
return self.save_linecache_getlines(filename)
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in `test`, and display the results using the
writer function `out`.
The examples are run in the namespace `test.globs`. If
`clear_globs` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use `clear_globs=False`.
`compileflags` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to `globs`.
The output of each example is checked using
`DocTestRunner.check_output`, and the results are formatted by
the `DocTestRunner.report_*` methods.
"""
self.test = test
if compileflags is None:
compileflags = _extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = _OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
try:
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
if clear_globs:
test.globs.clear()
#/////////////////////////////////////////////////////////////////
# Summarization
#/////////////////////////////////////////////////////////////////
def summarize(self, verbose=None):
"""
Print a summary of all the test cases that have been run by
this DocTestRunner, and return a tuple `(f, t)`, where `f` is
the total number of failed examples, and `t` is the total
number of tried examples.
The optional `verbose` argument controls how detailed the
summary is. If the verbosity is not specified, then the
DocTestRunner's verbosity is used.
"""
if verbose is None:
verbose = self._verbose
notests = []
passed = []
failed = []
totalt = totalf = 0
for x in self._name2ft.items():
name, (f, t) = x
assert f <= t
totalt += t
totalf += f
if t == 0:
notests.append(name)
elif f == 0:
passed.append( (name, t) )
else:
failed.append(x)
if verbose:
if notests:
print(len(notests), "items had no tests:")
notests.sort()
for thing in notests:
print(" ", thing)
if passed:
print(len(passed), "items passed all tests:")
passed.sort()
for thing, count in passed:
print(" %3d tests in %s" % (count, thing))
if failed:
print(self.DIVIDER)
print(len(failed), "items had failures:")
failed.sort()
for thing, (f, t) in failed:
print(" %3d of %3d in %s" % (f, t, thing))
if verbose:
print(totalt, "tests in", len(self._name2ft), "items.")
print(totalt - totalf, "passed and", totalf, "failed.")
if totalf:
print("***Test Failed***", totalf, "failures.")
elif verbose:
print("Test passed.")
return totalf, totalt
#/////////////////////////////////////////////////////////////////
# Backward compatibility cruft to maintain doctest.master.
#/////////////////////////////////////////////////////////////////
def merge(self, other):
d = self._name2ft
for name, (f, t) in other._name2ft.items():
if name in d:
print("*** DocTestRunner.merge: '" + name + "' in both" \
" testers; summing outcomes.")
f2, t2 = d[name]
f = f + f2
t = t + t2
d[name] = f, t
class OutputChecker:
"""
A class used to check the whether the actual output from a doctest
example matches the expected output. `OutputChecker` defines two
methods: `check_output`, which compares a given pair of outputs,
and returns true if they match; and `output_difference`, which
returns a string describing the differences between two outputs.
"""
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# The values True and False replaced 1 and 0 as the return
# value for boolean comparisons in Python 2.3.
if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
if (got,want) == ("True\n", "1\n"):
return True
if (got,want) == ("False\n", "0\n"):
return True
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub('(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & ELLIPSIS:
if _ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
# Should we do a fancy diff?
def _do_a_fancy_diff(self, want, got, optionflags):
# Not unless they asked for a fancy diff.
if not optionflags & (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF):
return False
# If expected output uses ellipsis, a meaningful fancy diff is
# too hard ... or maybe not. In two real-life failures Tim saw,
# a diff was a major help anyway, so this is commented out.
# [todo] _ellipsis_match() knows which pieces do and don't match,
# and could be the basis for a kick-ass diff in this case.
##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
## return False
# ndiff does intraline difference marking, so can be useful even
# for 1-line differences.
if optionflags & REPORT_NDIFF:
return True
# The other diff types need at least a few lines to be helpful.
return want.count('\n') > 2 and got.count('\n') > 2
def output_difference(self, example, got, optionflags):
"""
Return a string describing the differences between the
expected output for a given example (`example`) and the actual
output (`got`). `optionflags` is the set of option flags used
to compare `want` and `got`.
"""
want = example.want
# If <BLANKLINE>s are being used, then replace blank lines
# with <BLANKLINE> in the actual output string.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
# Check if we should use diff.
if self._do_a_fancy_diff(want, got, optionflags):
# Split want & got into lines.
want_lines = want.splitlines(True) # True == keep line ends
got_lines = got.splitlines(True)
# Use difflib to find their differences.
if optionflags & REPORT_UDIFF:
diff = difflib.unified_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'unified diff with -expected +actual'
elif optionflags & REPORT_CDIFF:
diff = difflib.context_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'context diff with expected followed by actual'
elif optionflags & REPORT_NDIFF:
engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
diff = list(engine.compare(want_lines, got_lines))
kind = 'ndiff with -expected +actual'
else:
assert 0, 'Bad diff option'
# Remove trailing whitespace on diff output.
diff = [line.rstrip() + '\n' for line in diff]
return 'Differences (%s):\n' % kind + _indent(''.join(diff))
# If we're not using diff, then simply list the expected
# output followed by the actual output.
if want and got:
return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
elif want:
return 'Expected:\n%sGot nothing\n' % _indent(want)
elif got:
return 'Expected nothing\nGot:\n%s' % _indent(got)
else:
return 'Expected nothing\nGot nothing\n'
class DocTestFailure(Exception):
"""A DocTest example has failed in debugging mode.
The exception instance has variables:
- test: the DocTest object being run
- excample: the Example object that failed
- got: the actual output
"""
def __init__(self, test, example, got):
self.test = test
self.example = example
self.got = got
def __str__(self):
return str(self.test)
class UnexpectedException(Exception):
"""A DocTest example has encountered an unexpected exception
The exception instance has variables:
- test: the DocTest object being run
- excample: the Example object that failed
- exc_info: the exception info
"""
def __init__(self, test, example, exc_info):
self.test = test
self.example = example
self.exc_info = exc_info
def __str__(self):
return str(self.test)
class DebugRunner(DocTestRunner):
r"""Run doc tests but raise an exception as soon as there is a failure.
If an unexpected exception occurs, an UnexpectedException is raised.
It contains the test, the example, and the original exception:
>>> runner = DebugRunner(verbose=False)
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except UnexpectedException, failure:
... pass
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
We wrap the original exception to give the calling application
access to the test and example information.
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
If a failure or error occurs, the globals are left intact:
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 1}
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... >>> raise KeyError
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
Traceback (most recent call last):
...
UnexpectedException: <DocTest foo from foo.py:0 (2 examples)>
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 2}
But the globals are cleared if there is no error:
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
(0, 1)
>>> test.globs
{}
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
r = DocTestRunner.run(self, test, compileflags, out, False)
if clear_globs:
test.globs.clear()
return r
def report_unexpected_exception(self, out, test, example, exc_info):
raise UnexpectedException(test, example, exc_info)
def report_failure(self, out, test, example, got):
raise DocTestFailure(test, example, got)
######################################################################
## 6. Test Functions
######################################################################
# These should be backwards compatible.
# For backward compatibility, a global instance of a DocTestRunner
# class, updated by testmod.
master = None
def testmod(m=None, name=None, globs=None, verbose=None, isprivate=None,
report=True, optionflags=0, extraglobs=None,
raise_on_error=False, exclude_empty=False):
"""m=None, name=None, globs=None, verbose=None, isprivate=None,
report=True, optionflags=0, extraglobs=None, raise_on_error=False,
exclude_empty=False
Test examples in docstrings in functions and classes reachable
from module m (or the current module if m is not supplied), starting
with m.__doc__. Unless isprivate is specified, private names
are not skipped.
Also test examples reachable from dict m.__test__ if it exists and is
not None. m.__test__ maps names to functions, classes and strings;
function and class docstrings are tested even if the name is private;
strings are tested directly, as if they were docstrings.
Return (#failures, #tests).
See doctest.__doc__ for an overview.
Optional keyword arg "name" gives the name of the module; by default
use m.__name__.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use m.__dict__. A copy of this
dict is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used. This is new in 2.4.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. This is new in 2.3. Possible values (see the
docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Deprecated in Python 2.4:
Optional keyword arg "isprivate" specifies a function used to
determine whether a name is private. The default function is
treat all functions as public. Optionally, "isprivate" can be
set to doctest.is_private to skip over functions marked as private
using the underscore naming convention; see its docs for details.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if isprivate is not None:
warnings.warn("the isprivate argument is deprecated; "
"examine DocTestFinder.find() lists instead",
DeprecationWarning)
# If no module was given, then use __main__.
if m is None:
# DWA - m will still be None if this wasn't invoked from the command
# line, in which case the following TypeError is about as good an error
# as we should expect
m = sys.modules.get('__main__')
# Check that we were actually given a module.
if not inspect.ismodule(m):
raise TypeError("testmod: module required; %r" % (m,))
# If no name was given, then use the module's name.
if name is None:
name = m.__name__
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(_namefilter=isprivate, exclude_empty=exclude_empty)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return runner.failures, runner.tries
def testfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False, parser=DocTestParser()):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg "module_relative" specifies how filenames
should be interpreted:
- If "module_relative" is True (the default), then "filename"
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
"package" argument is specified, then it is relative to that
package. To ensure os-independence, "filename" should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If "module_relative" is False, then "filename" specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg "name" gives the name of the test; by default
use the file's basename.
Optional keyword argument "package" is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify "package" if "module_relative" is False.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. Possible values (see the docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg "parser" specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
if module_relative:
package = _normalize_module(package)
filename = _module_relative_path(package, filename)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
# Read the file, convert it to a test, and run it.
f = open(filename)
s = f.read()
f.close()
test = parser.get_doctest(s, globs, name, filename, 0)
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return runner.failures, runner.tries
def run_docstring_examples(f, globs, verbose=False, name="NoName",
compileflags=None, optionflags=0):
"""
Test examples in the given object's docstring (`f`), using `globs`
as globals. Optional argument `name` is used in failure messages.
If the optional argument `verbose` is true, then generate output
even if there are no failures.
`compileflags` gives the set of flags that should be used by the
Python compiler when running the examples. If not specified, then
it will default to the set of future-import flags that apply to
`globs`.
Optional keyword arg `optionflags` specifies options for the
testing and output. See the documentation for `testmod` for more
information.
"""
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(verbose=verbose, recurse=False)
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(f, name, globs=globs):
runner.run(test, compileflags=compileflags)
######################################################################
## 7. Tester
######################################################################
# This is provided only for backwards compatibility. It's not
# actually used in any way.
class Tester:
def __init__(self, mod=None, globs=None, verbose=None,
isprivate=None, optionflags=0):
warnings.warn("class Tester is deprecated; "
"use class doctest.DocTestRunner instead",
DeprecationWarning, stacklevel=2)
if mod is None and globs is None:
raise TypeError("Tester.__init__: must specify mod or globs")
if mod is not None and not inspect.ismodule(mod):
raise TypeError("Tester.__init__: mod must be a module; %r" %
(mod,))
if globs is None:
globs = mod.__dict__
self.globs = globs
self.verbose = verbose
self.isprivate = isprivate
self.optionflags = optionflags
self.testfinder = DocTestFinder(_namefilter=isprivate)
self.testrunner = DocTestRunner(verbose=verbose,
optionflags=optionflags)
def runstring(self, s, name):
test = DocTestParser().get_doctest(s, self.globs, name, None, None)
if self.verbose:
print("Running string", name)
(f,t) = self.testrunner.run(test)
if self.verbose:
print(f, "of", t, "examples failed in string", name)
return (f,t)
def rundoc(self, object, name=None, module=None):
f = t = 0
tests = self.testfinder.find(object, name, module=module,
globs=self.globs)
for test in tests:
(f2, t2) = self.testrunner.run(test)
(f,t) = (f+f2, t+t2)
return (f,t)
def rundict(self, d, name, module=None):
import types
m = types.ModuleType(name)
m.__dict__.update(d)
if module is None:
module = False
return self.rundoc(m, name, module)
def run__test__(self, d, name):
import types
m = types.ModuleType(name)
m.__test__ = d
return self.rundoc(m, name)
def summarize(self, verbose=None):
return self.testrunner.summarize(verbose)
def merge(self, other):
self.testrunner.merge(other.testrunner)
######################################################################
## 8. Unittest Support
######################################################################
_unittest_reportflags = 0
def set_unittest_reportflags(flags):
"""Sets the unittest option flags.
The old flag is returned so that a runner could restore the old
value if it wished to:
>>> old = _unittest_reportflags
>>> set_unittest_reportflags(REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE) == old
True
>>> import doctest
>>> doctest._unittest_reportflags == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
Only reporting flags can be set:
>>> set_unittest_reportflags(ELLIPSIS)
Traceback (most recent call last):
...
ValueError: ('Only reporting flags allowed', 8)
>>> set_unittest_reportflags(old) == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
"""
global _unittest_reportflags
if (flags & REPORTING_FLAGS) != flags:
raise ValueError("Only reporting flags allowed", flags)
old = _unittest_reportflags
_unittest_reportflags = flags
return old
class DocTestCase(unittest.TestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None):
unittest.TestCase.__init__(self)
self._dt_optionflags = optionflags
self._dt_checker = checker
self._dt_test = test
self._dt_setUp = setUp
self._dt_tearDown = tearDown
def setUp(self):
test = self._dt_test
if self._dt_setUp is not None:
self._dt_setUp(test)
def tearDown(self):
test = self._dt_test
if self._dt_tearDown is not None:
self._dt_tearDown(test)
test.globs.clear()
def runTest(self):
test = self._dt_test
old = sys.stdout
new = StringIO()
optionflags = self._dt_optionflags
if not (optionflags & REPORTING_FLAGS):
# The option flags don't include any reporting flags,
# so add the default reporting flags
optionflags |= _unittest_reportflags
runner = DocTestRunner(optionflags=optionflags,
checker=self._dt_checker, verbose=False)
try:
runner.DIVIDER = "-"*70
failures, tries = runner.run(
test, out=new.write, clear_globs=False)
finally:
sys.stdout = old
if failures:
raise self.failureException(self.format_failure(new.getvalue()))
def format_failure(self, err):
test = self._dt_test
if test.lineno is None:
lineno = 'unknown line number'
else:
lineno = '%s' % test.lineno
lname = '.'.join(test.name.split('.')[-1:])
return ('Failed doctest test for %s\n'
' File "%s", line %s, in %s\n\n%s'
% (test.name, test.filename, lineno, lname, err)
)
def debug(self):
r"""Run the test case without results and without catching exceptions
The unit test framework includes a debug method on test cases
and test suites to support post-mortem debugging. The test code
is run in such a way that errors are not caught. This way a
caller can catch the errors and initiate post-mortem debugging.
The DocTestCase provides a debug method that raises
UnexpectedException errors if there is an unexepcted
exception:
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except UnexpectedException, failure:
... pass
The UnexpectedException contains the test, the example, and
the original exception:
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
"""
self.setUp()
runner = DebugRunner(optionflags=self._dt_optionflags,
checker=self._dt_checker, verbose=False)
runner.run(self._dt_test)
self.tearDown()
def id(self):
return self._dt_test.name
def __repr__(self):
name = self._dt_test.name.split('.')
return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
__str__ = __repr__
def shortDescription(self):
return "Doctest: " + self._dt_test.name
def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
**options):
"""
Convert doctest tests for a module to a unittest test suite.
This converts each documentation string in a module that
contains doctest tests to a unittest test case. If any of the
tests in a doc string fail, then the test case fails. An exception
is raised showing the name of the file containing the test and a
(sometimes approximate) line number.
The `module` argument provides the module to be tested. The argument
can be either a module or a module name.
If no argument is given, the calling module is used.
A number of options may be provided as keyword arguments:
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
"""
if test_finder is None:
test_finder = DocTestFinder()
module = _normalize_module(module)
tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
if globs is None:
globs = module.__dict__
if not tests:
# Why do we want to do this? Because it reveals a bug that might
# otherwise be hidden.
raise ValueError(module, "has no tests")
tests.sort()
suite = unittest.TestSuite()
for test in tests:
if len(test.examples) == 0:
continue
if not test.filename:
filename = module.__file__
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
test.filename = filename
suite.addTest(DocTestCase(test, **options))
return suite
class DocFileCase(DocTestCase):
def id(self):
return '_'.join(self._dt_test.name.split('.'))
def __repr__(self):
return self._dt_test.filename
__str__ = __repr__
def format_failure(self, err):
return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'
% (self._dt_test.name, self._dt_test.filename, err)
)
def DocFileTest(path, module_relative=True, package=None,
globs=None, parser=DocTestParser(), **options):
if globs is None:
globs = {}
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path.
if module_relative:
package = _normalize_module(package)
path = _module_relative_path(package, path)
# Find the file and read it.
name = os.path.basename(path)
f = open(path)
doc = f.read()
f.close()
# Convert it to a test, and wrap it in a DocFileCase.
test = parser.get_doctest(doc, globs, name, path, 0)
return DocFileCase(test, **options)
def DocFileSuite(*paths, **kw):
"""A unittest suite for one or more doctest files.
The path to each doctest file is given as a string; the
interpretation of that string depends on the keyword argument
"module_relative".
A number of options may be provided as keyword arguments:
module_relative
If "module_relative" is True, then the given file paths are
interpreted as os-independent module-relative paths. By
default, these paths are relative to the calling module's
directory; but if the "package" argument is specified, then
they are relative to that package. To ensure os-independence,
"filename" should use "/" characters to separate path
segments, and may not be an absolute path (i.e., it may not
begin with "/").
If "module_relative" is False, then the given file paths are
interpreted as os-specific paths. These paths may be absolute
or relative (to the current working directory).
package
A Python package or the name of a Python package whose directory
should be used as the base directory for module relative paths.
If "package" is not specified, then the calling module's
directory is used as the base directory for module relative
filenames. It is an error to specify "package" if
"module_relative" is False.
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
parser
A DocTestParser (or subclass) that should be used to extract
tests from the files.
"""
suite = unittest.TestSuite()
# We do this here so that _normalize_module is called at the right
# level. If it were called in DocFileTest, then this function
# would be the caller and we might guess the package incorrectly.
if kw.get('module_relative', True):
kw['package'] = _normalize_module(kw.get('package'))
for path in paths:
suite.addTest(DocFileTest(path, **kw))
return suite
######################################################################
## 9. Debugging Support
######################################################################
def script_from_examples(s):
r"""Extract script from text with examples.
Converts text with examples to a Python script. Example input is
converted to regular code. Example output and all other words
are converted to comments:
>>> text = '''
... Here are examples of simple math.
...
... Python has super accurate integer addition
...
... >>> 2 + 2
... 5
...
... And very friendly error messages:
...
... >>> 1/0
... To Infinity
... And
... Beyond
...
... You can use logic if you want:
...
... >>> if 0:
... ... blah
... ... blah
... ...
...
... Ho hum
... '''
>>> print script_from_examples(text)
# Here are examples of simple math.
#
# Python has super accurate integer addition
#
2 + 2
# Expected:
## 5
#
# And very friendly error messages:
#
1/0
# Expected:
## To Infinity
## And
## Beyond
#
# You can use logic if you want:
#
if 0:
blah
blah
#
# Ho hum
"""
output = []
for piece in DocTestParser().parse(s):
if isinstance(piece, Example):
# Add the example's source code (strip trailing NL)
output.append(piece.source[:-1])
# Add the expected output:
want = piece.want
if want:
output.append('# Expected:')
output += ['## '+l for l in want.split('\n')[:-1]]
else:
# Add non-example text.
output += [_comment_line(l)
for l in piece.split('\n')[:-1]]
# Trim junk on both ends.
while output and output[-1] == '#':
output.pop()
while output and output[0] == '#':
output.pop(0)
# Combine the output, and return it.
return '\n'.join(output)
def testsource(module, name):
"""Extract the test sources from a doctest docstring as a script.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the doc string with tests to be debugged.
"""
module = _normalize_module(module)
tests = DocTestFinder().find(module)
test = [t for t in tests if t.name == name]
if not test:
raise ValueError(name, "not found in tests")
test = test[0]
testsrc = script_from_examples(test.docstring)
return testsrc
def debug_src(src, pm=False, globs=None):
"""Debug a single doctest docstring, in argument `src`'"""
testsrc = script_from_examples(src)
debug_script(testsrc, pm, globs)
def debug_script(src, pm=False, globs=None):
"Debug a test script. `src` is the script, as a string."
import pdb
# Note that tempfile.NameTemporaryFile() cannot be used. As the
# docs say, a file so created cannot be opened by name a second time
# on modern Windows boxes, and execfile() needs to open it.
srcfilename = tempfile.mktemp(".py", "doctestdebug")
f = open(srcfilename, 'w')
f.write(src)
f.close()
try:
if globs:
globs = globs.copy()
else:
globs = {}
if pm:
try:
execfile(srcfilename, globs, globs)
except:
print(sys.exc_info()[1])
pdb.post_mortem(sys.exc_info()[2])
else:
# Note that %r is vital here. '%s' instead can, e.g., cause
# backslashes to get treated as metacharacters on Windows.
pdb.run("execfile(%r)" % srcfilename, globs, globs)
finally:
os.remove(srcfilename)
def debug(module, name, pm=False):
"""Debug a single doctest docstring.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the docstring with tests to be debugged.
"""
module = _normalize_module(module)
testsrc = testsource(module, name)
debug_script(testsrc, pm, module.__dict__)
######################################################################
## 10. Example Usage
######################################################################
class _TestClass:
"""
A pointless class, for sanity-checking of docstring testing.
Methods:
square()
get()
>>> _TestClass(13).get() + _TestClass(-12).get()
1
>>> hex(_TestClass(13).square().get())
'0xa9'
"""
def __init__(self, val):
"""val -> _TestClass object with associated value val.
>>> t = _TestClass(123)
>>> print t.get()
123
"""
self.val = val
def square(self):
"""square() -> square TestClass's associated value
>>> _TestClass(13).square().get()
169
"""
self.val = self.val ** 2
return self
def get(self):
"""get() -> return TestClass's associated value.
>>> x = _TestClass(-42)
>>> print x.get()
-42
"""
return self.val
__test__ = {"_TestClass": _TestClass,
"string": r"""
Example of a string object, searched as-is.
>>> x = 1; y = 2
>>> x + y, x * y
(3, 2)
""",
"bool-int equivalence": r"""
In 2.2, boolean expressions displayed
0 or 1. By default, we still accept
them. This can be disabled by passing
DONT_ACCEPT_TRUE_FOR_1 to the new
optionflags argument.
>>> 4 == 4
1
>>> 4 == 4
True
>>> 4 > 4
0
>>> 4 > 4
False
""",
"blank lines": r"""
Blank lines can be marked with <BLANKLINE>:
>>> print 'foo\n\nbar\n'
foo
<BLANKLINE>
bar
<BLANKLINE>
""",
"ellipsis": r"""
If the ellipsis flag is used, then '...' can be used to
elide substrings in the desired output:
>>> print range(1000) #doctest: +ELLIPSIS
[0, 1, 2, ..., 999]
""",
"whitespace normalization": r"""
If the whitespace normalization flag is used, then
differences in whitespace are ignored.
>>> print range(30) #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29]
""",
}
def _test():
r = unittest.TextTestRunner()
r.run(DocTestSuite())
if __name__ == "__main__":
_test()
|
hojel/calibre | refs/heads/master | src/calibre/gui2/tweak_book/__init__.py | 2 | #!/usr/bin/env python2
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import string
from future_builtins import map
from calibre.utils.config import JSONConfig
from calibre.spell.dictionary import Dictionaries, parse_lang_code
tprefs = JSONConfig('tweak_book_gui')
d = tprefs.defaults
d['editor_theme'] = None
d['editor_font_family'] = None
d['editor_font_size'] = 12
d['editor_line_wrap'] = True
d['editor_tab_stop_width'] = 2
d['editor_show_char_under_cursor'] = True
d['replace_entities_as_typed'] = True
d['preview_refresh_time'] = 2
d['choose_tweak_fmt'] = True
d['tweak_fmt_order'] = ['EPUB', 'AZW3']
d['update_metadata_from_calibre'] = True
d['nestable_dock_widgets'] = False
d['dock_top_left'] = 'horizontal'
d['dock_top_right'] = 'horizontal'
d['dock_bottom_left'] = 'horizontal'
d['dock_bottom_right'] = 'horizontal'
d['preview_serif_family'] = 'Liberation Serif'
d['preview_sans_family'] = 'Liberation Sans'
d['preview_mono_family'] = 'Liberation Mono'
d['preview_standard_font_family'] = 'serif'
d['preview_base_font_size'] = 18
d['preview_mono_font_size'] = 14
d['preview_minimum_font_size'] = 8
d['remove_existing_links_when_linking_sheets'] = True
d['charmap_favorites'] = list(map(ord, '\xa0\u2002\u2003\u2009\xad' '‘’“”‹›«»‚„' '—–§¶†‡©®™' '→⇒•·°±−×÷¼½½¾' '…µ¢£€¿¡¨´¸ˆ˜' 'ÀÁÂÃÄÅÆÇÈÉÊË' 'ÌÍÎÏÐÑÒÓÔÕÖØ' 'ŒŠÙÚÛÜÝŸÞßàá' 'âãäåæçèéêëìí' 'îïðñòóôõöøœš' 'ùúûüýÿþªºαΩ∞')) # noqa
d['folders_for_types'] = {'style':'styles', 'image':'images', 'font':'fonts', 'audio':'audio', 'video':'video'}
d['pretty_print_on_open'] = False
d['disable_completion_popup_for_search'] = False
d['saved_searches'] = []
d['insert_tag_mru'] = ['p', 'div', 'li', 'h1', 'h2', 'h3', 'h4', 'em', 'strong', 'td', 'tr']
d['spell_check_case_sensitive_sort'] = False
d['inline_spell_check'] = True
d['custom_themes'] = {}
d['remove_unused_classes'] = False
d['global_book_toolbar'] = [
'new-file', 'open-book', 'save-book', None, 'global-undo', 'global-redo', 'create-checkpoint', None, 'donate', 'user-manual']
d['global_tools_toolbar'] = [
'check-book', 'spell-check-book', 'edit-toc', 'insert-character',
'manage-fonts', 'smarten-punctuation', 'remove-unused-css', 'show-reports'
]
d['global_plugins_toolbar'] = []
d['editor_common_toolbar'] = [('editor-' + x) if x else None for x in ('undo', 'redo', None, 'cut', 'copy', 'paste')]
d['editor_css_toolbar'] = ['pretty-current', 'insert-image']
d['editor_xml_toolbar'] = ['pretty-current', 'insert-tag']
d['editor_html_toolbar'] = ['fix-html-current', 'pretty-current', 'insert-image', 'insert-hyperlink', 'insert-tag', 'change-paragraph']
d['editor_format_toolbar'] = [('format-text-' + x) if x else x for x in (
'bold', 'italic', 'underline', 'strikethrough', 'subscript', 'superscript',
None, 'color', 'background-color', None, 'justify-left', 'justify-center',
'justify-right', 'justify-fill')]
d['spell_check_case_sensitive_search'] = False
d['add_cover_preserve_aspect_ratio'] = False
d['templates'] = {}
d['auto_close_tags'] = True
d['restore_book_state'] = True
d['editor_accepts_drops'] = True
del d
ucase_map = {l:string.ascii_uppercase[i] for i, l in enumerate(string.ascii_lowercase)}
def capitalize(x):
return ucase_map[x[0]] + x[1:]
_current_container = None
def current_container():
return _current_container
def set_current_container(container):
global _current_container
_current_container = container
class NonReplaceDict(dict):
def __setitem__(self, k, v):
if k in self:
raise ValueError('The key %s is already present' % k)
dict.__setitem__(self, k, v)
actions = NonReplaceDict()
editors = NonReplaceDict()
toolbar_actions = NonReplaceDict()
editor_toolbar_actions = {
'format':NonReplaceDict(), 'html':NonReplaceDict(), 'xml':NonReplaceDict(), 'css':NonReplaceDict()}
TOP = object()
dictionaries = Dictionaries()
def editor_name(editor):
for n, ed in editors.iteritems():
if ed is editor:
return n
def set_book_locale(lang):
dictionaries.initialize()
try:
dictionaries.default_locale = parse_lang_code(lang)
if dictionaries.default_locale.langcode == 'und':
raise ValueError('')
except ValueError:
dictionaries.default_locale = dictionaries.ui_locale
from calibre.gui2.tweak_book.editor.syntax.html import refresh_spell_check_status
refresh_spell_check_status()
def verify_link(url, name=None):
if _current_container is None or name is None:
return None
try:
target = _current_container.href_to_name(url, name)
except ValueError:
return False # Absolute URLs that point to a different drive on windows cause this
if _current_container.has_name(target):
return True
if url.startswith('#'):
return True
if url.partition(':')[0] in {'http', 'https', 'mailto'}:
return True
return False
def update_mark_text_action(ed=None):
has_mark = False
if ed is not None and ed.has_line_numbers:
has_mark = bool(ed.selected_text) or not ed.has_marked_text
ac = actions['mark-selected-text']
ac.setText(ac.default_text if has_mark else _('Unmark marked text'))
|
anhstudios/swganh | refs/heads/develop | data/scripts/templates/object/mobile/shared_gualama.py | 2 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_gualama.iff"
result.attribute_template_id = 9
result.stfName("monster_name","gualama")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
Bitergia/allura | refs/heads/master | scripts/migrations/019-set-parent-only-troves.py | 3 | import logging
from tg import config
from ming.orm import ThreadLocalORMSession
import sfx
from allura import model as M
from allura.lib import helpers as h
from sfx.model import tables as T
log = logging.getLogger(__name__)
def main():
sfx.middleware.configure_databases(h.config_with_prefix(config, 'sfx.'))
parent_only_troves = T.trove_cat.select(T.trove_cat.c.parent_only==1).execute()
parent_only_ids = [t.trove_cat_id for t in parent_only_troves]
allura_troves = M.TroveCategory.query.find(dict(
trove_cat_id={'$in': parent_only_ids})).all()
print 'Found %s parent-only troves in alexandria.' % len(parent_only_ids)
print 'Setting parent-only Allura troves...'
for t in allura_troves:
print ' %s: %s' % (t.trove_cat_id, t.fullpath)
t.parent_only = True
print 'Updated %s Allura troves.' % len(allura_troves)
ThreadLocalORMSession.flush_all()
if __name__ == '__main__':
main()
|
yajnab/android_kernel_htc_golfu | refs/heads/ics | tools/perf/scripts/python/sched-migration.py | 11215 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.