repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
beeverycreative/BEEweb | refs/heads/master | src/octoprint/server/util/connection_util.py | 1 | # coding=utf-8
import logging
import threading
from time import sleep
from beedriver.connection import Conn as BeePrinterConn
from octoprint.settings import settings
class ConnectionMonitorThread(threading.Thread):
def __init__(self, connection_callback):
"""
Thread class to check if a BVC printer was connected to a USB port
:param connection_callback: Callback function to call when a printer is detected
:return:
"""
super(ConnectionMonitorThread, self).__init__()
self.USB_POLL_INTERVAL = 1 # seconds
self._printerConnIntf = BeePrinterConn(dummyPlug=settings().getBoolean(["usb", "dummyPrinter"]))
self._logger = logging.getLogger()
self._printer_detected_msg_logged = False
self._controlFlag = True
self._connection_callback = connection_callback
def stop_connection_monitor(self):
self._controlFlag = False
self._logger.info("BVC Printer connection monitor stopped.")
def run(self):
self._logger.info("Starting BVC Printer connection monitor...")
while self._controlFlag:
printers = self._printerConnIntf.getPrinterList()
if len(printers) > 0: # printer found
if not self._printer_detected_msg_logged:
self._logger.info("BVC Printer detected. Waiting for client connection...")
self._printer_detected_msg_logged = True
if self._connection_callback():
return
sleep (self.USB_POLL_INTERVAL)
|
bmya/odoo-argentina | refs/heads/11.0 | l10n_ar_account/report/account_ar_vat_line.py | 1 | from odoo import tools, models, fields, api, _
# from ast import literal_eval
class AccountArVatLine(models.Model):
"""
Modelo base para nuevos reportes argentinos de iva. La idea es que estas
lineas tenga todos los datos necesarios y que frente a cambios en odoo, los
mismos sean abosrvidos por este cubo y no se requieran cambios en los
reportes que usan estas lineas.
Se genera una linea para cada apunte contable afectado por iva
Basicamente lo que hace es convertir los apuntes contables en columnas
segun la informacion de impuestos y ademas agrega algunos otros
campos
"""
_name = "account.ar.vat.line"
_description = "Línea de IVA para análisis en localización argentina"
_auto = False
document_type_id = fields.Many2one(
'account.document.type',
'Document Type',
readonly=True
)
date = fields.Date(
readonly=True
)
# TODO analizar si lo hacemos related simplemente pero con store, no lo
# hicimos por posibles temas de performance
comprobante = fields.Selection([
('out_invoice', 'Customer Invoice'),
('in_invoice', 'Vendor Bill'),
('out_refund', 'Customer Refund'),
('in_refund', 'Vendor Refund'),
],
readonly=True,
)
ref = fields.Char(
'Partner Reference',
readonly=True
)
name = fields.Char(
'Label',
readonly=True
)
base_21 = fields.Monetary(
readonly=True,
string='Grav. 21%',
currency_field='company_currency_id',
)
iva_21 = fields.Monetary(
readonly=True,
string='IVA 21%',
currency_field='company_currency_id',
)
base_27 = fields.Monetary(
readonly=True,
string='Grav. 27%',
currency_field='company_currency_id',
)
iva_27 = fields.Monetary(
readonly=True,
string='IVA 27%',
currency_field='company_currency_id',
)
base_10 = fields.Monetary(
readonly=True,
string='Grav. 10,5%',
currency_field='company_currency_id',
)
iva_10 = fields.Monetary(
readonly=True,
string='IVA 10,5%',
currency_field='company_currency_id',
)
base_25 = fields.Monetary(
readonly=True,
string='Grav. 2,5%',
currency_field='company_currency_id',
)
iva_25 = fields.Monetary(
readonly=True,
string='IVA 2,5%',
currency_field='company_currency_id',
)
base_5 = fields.Monetary(
readonly=True,
string='Grav. 5%',
currency_field='company_currency_id',
)
iva_5 = fields.Monetary(
readonly=True,
string='IVA 5%',
currency_field='company_currency_id',
)
per_iva = fields.Monetary(
readonly=True,
string='Perc. IVA',
help='Percepción de IVA',
currency_field='company_currency_id',
)
no_gravado_iva = fields.Monetary(
readonly=True,
string='No grav/ex',
help='No gravado / Exento.\n'
'Todo lo que tenga iva 0, exento, no gravado o no corresponde',
currency_field='company_currency_id',
)
otros_impuestos = fields.Monetary(
readonly=True,
string='Otr. Imp',
help='Otros Impuestos. Todos los impuestos otros impuestos que no sean'
' ni iva ni perc de iibb y que figuren en comprobantes afectados por '
'IVA',
currency_field='company_currency_id',
)
total = fields.Monetary(
readonly=True,
currency_field='company_currency_id',
)
# currency_id = fields.Many2one(
# 'res.currency',
# 'Currency',
# readonly=True
# )
# amount_currency = fields.Monetary(
# readonly=True,
# currency_field='currency_id',
# TODO idem, tal vez related con store? Performance?
state = fields.Selection(
[('draft', 'Unposted'), ('posted', 'Posted')],
'Status',
readonly=True
)
journal_id = fields.Many2one(
'account.journal',
'Journal',
readonly=True,
auto_join=True,
)
partner_id = fields.Many2one(
'res.partner',
'Partner',
readonly=True,
auto_join=True,
)
# TODO idem, tal vez related con store? Performance?
afip_responsability_type_id = fields.Many2one(
'afip.responsability.type',
string='AFIP Responsability Type',
readonly=True,
auto_join=True,
)
company_id = fields.Many2one(
'res.company',
'Company',
readonly=True,
auto_join=True,
)
company_currency_id = fields.Many2one(
related='company_id.currency_id',
readonly=True,
)
move_id = fields.Many2one(
'account.move',
string='Entry',
auto_join=True,
)
@api.multi
def open_journal_entry(self):
self.ensure_one()
return {
'name': _('Journal Entry'),
'target': 'current',
'res_id': self.id,
'view_type': 'form',
'view_mode': 'form',
'res_model': 'account.move',
'type': 'ir.actions.act_window',
}
@api.model_cr
def init(self):
cr = self.env.cr
tools.drop_view_if_exists(cr, self._table)
env = api.Environment(cr, 1, {})
ref = env.ref
# TODO tal vez chequear que todas las lineas tengan base vat tax
# o que sean un tax ya que es obligatorio en loc ar y ademas nos
# garantiza que se calcule bien
# TODO tal vez querramos agregar chequeo de que no haya tax groups
# tipo vat que no esten en los ref
vat_tax_groups = env['account.tax.group'].search(
[('tax', '=', 'vat')])
tg_nc = ref('l10n_ar_account.tax_group_iva_no_corresponde', False)
tg_ng = ref('l10n_ar_account.tax_group_iva_no_gravado', False)
tg_ex = ref('l10n_ar_account.tax_group_iva_exento', False)
tg_0 = ref('l10n_ar_account.tax_group_iva_0', False)
tg_21 = ref('l10n_ar_account.tax_group_iva_21', False)
tg_10 = ref('l10n_ar_account.tax_group_iva_10', False)
tg_27 = ref('l10n_ar_account.tax_group_iva_27', False)
tg_25 = ref('l10n_ar_account.tax_group_iva_25', False)
tg_5 = ref('l10n_ar_account.tax_group_iva_5', False)
tg_iva0 = False
if tg_nc and tg_ng and tg_ex and tg_0:
tg_iva0 = tg_nc + tg_ng + tg_ex + tg_0
tg_per_iva = ref('l10n_ar_account.tax_group_percepcion_iva', False)
# TODO ver si en prox versiones en vez de usar los tax group y ext id
# usamos labels o algo mas odoo way
# if external ids not loaded yet, we load a dummy id 0
vals = {
'tg21': tg_21 and tg_21.id or 0,
'tg10': tg_10 and tg_10.id or 0,
'tg27': tg_27 and tg_27.id or 0,
'tg25': tg_25 and tg_25.id or 0,
'tg5': tg_5 and tg_5.id or 0,
'tg_per_iva': tg_per_iva and tg_per_iva.id or 0,
# tuple [0, 0] so we dont have error on sql
'tg_iva0': tuple(tg_iva0 and tg_iva0.ids or [0, 0]),
'tg_vats': tuple(vat_tax_groups and vat_tax_groups.ids or [0, 0]),
}
query = """
SELECT
am.id,
am.id as move_id,
am.date,
am.journal_id,
am.company_id,
am.partner_id,
am.name,
am.ref,
am.afip_responsability_type_id,
am.state,
am.document_type_id,
/*TODO si agregamos recibos entonces tenemos que mapear valores aca*/
ai.type as comprobante,
sum(CASE WHEN bt.tax_group_id=%(tg21)s THEN aml.balance ELSE 0 END)
as base_21,
sum(CASE WHEN nt.tax_group_id=%(tg21)s THEN aml.balance ELSE 0 END)
as iva_21,
sum(CASE WHEN bt.tax_group_id=%(tg10)s THEN aml.balance ELSE 0 END)
as base_10,
sum(CASE WHEN nt.tax_group_id=%(tg10)s THEN aml.balance ELSE 0 END)
as iva_10,
sum(CASE WHEN bt.tax_group_id=%(tg27)s THEN aml.balance ELSE 0 END)
as base_27,
sum(CASE WHEN nt.tax_group_id=%(tg27)s THEN aml.balance ELSE 0 END)
as iva_27,
sum(CASE WHEN bt.tax_group_id=%(tg25)s THEN aml.balance ELSE 0 END)
as base_25,
sum(CASE WHEN nt.tax_group_id=%(tg25)s THEN aml.balance ELSE 0 END)
as iva_25,
sum(CASE WHEN bt.tax_group_id=%(tg5)s THEN aml.balance ELSE 0 END)
as base_5,
sum(CASE WHEN nt.tax_group_id=%(tg5)s THEN aml.balance ELSE 0 END)
as iva_5,
--TODO separar sufido y aplicado o filtrar por tipo de operacion o algo?
sum(CASE WHEN nt.tax_group_id=%(tg_per_iva)s THEN aml.balance ELSE 0 END)
as per_iva,
sum(CASE WHEN bt.tax_group_id in %(tg_iva0)s THEN aml.balance ELSE 0 END)
as no_gravado_iva,
sum(CASE WHEN nt.tax_group_id not in %(tg_vats)s THEN aml.balance ELSE
0 END) as otros_impuestos,
sum(aml.balance) as total
FROM
account_move_line aml
LEFT JOIN
account_move as am
ON aml.move_id = am.id
LEFT JOIN
account_invoice as ai
ON aml.invoice_id = ai.id
LEFT JOIN
account_account AS aa
ON aml.account_id = aa.id
LEFT JOIN
-- nt = net tax
account_tax AS nt
ON aml.tax_line_id = nt.id
LEFT JOIN
account_move_line_account_tax_rel AS amltr
ON aml.id = amltr.account_move_line_id
LEFT JOIN
-- bt = base tax
account_tax AS bt
ON amltr.account_tax_id = bt.id
WHERE
aa.internal_type not in ('payable', 'receivable', 'liquidity')
GROUP BY
am.id, am.state, am.document_type_id,
am.afip_responsability_type_id,
ai.type
""" % vals
sql = """CREATE or REPLACE VIEW %s as (%s)""" % (self._table, query)
cr.execute(sql)
|
plotly/plotly.py | refs/heads/master | packages/python/plotly/plotly/validators/scattergeo/marker/line/_colorscale.py | 1 | import _plotly_utils.basevalidators
class ColorscaleValidator(_plotly_utils.basevalidators.ColorscaleValidator):
def __init__(
self, plotly_name="colorscale", parent_name="scattergeo.marker.line", **kwargs
):
super(ColorscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {"autocolorscale": False}),
**kwargs
)
|
sergio-incaser/odoo | refs/heads/8.0 | addons/association/__openerp__.py | 260 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Associations Management',
'version': '0.1',
'category': 'Specific Industry Applications',
'description': """
This module is to configure modules related to an association.
==============================================================
It installs the profile for associations to manage events, registrations, memberships,
membership products (schemes).
""",
'author': 'OpenERP SA',
'depends': ['base_setup', 'membership', 'event'],
'data': ['security/ir.model.access.csv', 'profile_association.xml'],
'demo': [],
'installable': True,
'auto_install': False,
'website': 'https://www.odoo.com'
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Feshchenkostanislav/drupal7 | refs/heads/master | sites/all/themes/myomega/node_modules/node-gyp/gyp/buildbot/buildbot_run.py | 1467 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Argument-less script to select what to run on the buildbots."""
import os
import shutil
import subprocess
import sys
BUILDBOT_DIR = os.path.dirname(os.path.abspath(__file__))
TRUNK_DIR = os.path.dirname(BUILDBOT_DIR)
ROOT_DIR = os.path.dirname(TRUNK_DIR)
CMAKE_DIR = os.path.join(ROOT_DIR, 'cmake')
CMAKE_BIN_DIR = os.path.join(CMAKE_DIR, 'bin')
OUT_DIR = os.path.join(TRUNK_DIR, 'out')
def CallSubProcess(*args, **kwargs):
"""Wrapper around subprocess.call which treats errors as build exceptions."""
with open(os.devnull) as devnull_fd:
retcode = subprocess.call(stdin=devnull_fd, *args, **kwargs)
if retcode != 0:
print '@@@STEP_EXCEPTION@@@'
sys.exit(1)
def PrepareCmake():
"""Build CMake 2.8.8 since the version in Precise is 2.8.7."""
if os.environ['BUILDBOT_CLOBBER'] == '1':
print '@@@BUILD_STEP Clobber CMake checkout@@@'
shutil.rmtree(CMAKE_DIR)
# We always build CMake 2.8.8, so no need to do anything
# if the directory already exists.
if os.path.isdir(CMAKE_DIR):
return
print '@@@BUILD_STEP Initialize CMake checkout@@@'
os.mkdir(CMAKE_DIR)
print '@@@BUILD_STEP Sync CMake@@@'
CallSubProcess(
['git', 'clone',
'--depth', '1',
'--single-branch',
'--branch', 'v2.8.8',
'--',
'git://cmake.org/cmake.git',
CMAKE_DIR],
cwd=CMAKE_DIR)
print '@@@BUILD_STEP Build CMake@@@'
CallSubProcess(
['/bin/bash', 'bootstrap', '--prefix=%s' % CMAKE_DIR],
cwd=CMAKE_DIR)
CallSubProcess( ['make', 'cmake'], cwd=CMAKE_DIR)
def GypTestFormat(title, format=None, msvs_version=None, tests=[]):
"""Run the gyp tests for a given format, emitting annotator tags.
See annotator docs at:
https://sites.google.com/a/chromium.org/dev/developers/testing/chromium-build-infrastructure/buildbot-annotations
Args:
format: gyp format to test.
Returns:
0 for sucesss, 1 for failure.
"""
if not format:
format = title
print '@@@BUILD_STEP ' + title + '@@@'
sys.stdout.flush()
env = os.environ.copy()
if msvs_version:
env['GYP_MSVS_VERSION'] = msvs_version
command = ' '.join(
[sys.executable, 'gyp/gyptest.py',
'--all',
'--passed',
'--format', format,
'--path', CMAKE_BIN_DIR,
'--chdir', 'gyp'] + tests)
retcode = subprocess.call(command, cwd=ROOT_DIR, env=env, shell=True)
if retcode:
# Emit failure tag, and keep going.
print '@@@STEP_FAILURE@@@'
return 1
return 0
def GypBuild():
# Dump out/ directory.
print '@@@BUILD_STEP cleanup@@@'
print 'Removing %s...' % OUT_DIR
shutil.rmtree(OUT_DIR, ignore_errors=True)
print 'Done.'
retcode = 0
if sys.platform.startswith('linux'):
retcode += GypTestFormat('ninja')
retcode += GypTestFormat('make')
PrepareCmake()
retcode += GypTestFormat('cmake')
elif sys.platform == 'darwin':
retcode += GypTestFormat('ninja')
retcode += GypTestFormat('xcode')
retcode += GypTestFormat('make')
elif sys.platform == 'win32':
retcode += GypTestFormat('ninja')
if os.environ['BUILDBOT_BUILDERNAME'] == 'gyp-win64':
retcode += GypTestFormat('msvs-ninja-2013', format='msvs-ninja',
msvs_version='2013',
tests=[
r'test\generator-output\gyptest-actions.py',
r'test\generator-output\gyptest-relocate.py',
r'test\generator-output\gyptest-rules.py'])
retcode += GypTestFormat('msvs-2013', format='msvs', msvs_version='2013')
else:
raise Exception('Unknown platform')
if retcode:
# TODO(bradnelson): once the annotator supports a postscript (section for
# after the build proper that could be used for cumulative failures),
# use that instead of this. This isolates the final return value so
# that it isn't misattributed to the last stage.
print '@@@BUILD_STEP failures@@@'
sys.exit(retcode)
if __name__ == '__main__':
GypBuild()
|
darren-wang/gl | refs/heads/master | glance/db/sqlalchemy/metadef_api/namespace.py | 6 | # Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy.utils import paginate_query
from oslo_log import log as logging
import sqlalchemy.exc as sa_exc
from sqlalchemy import or_
import sqlalchemy.orm as sa_orm
from glance.common import exception as exc
import glance.db.sqlalchemy.metadef_api as metadef_api
from glance.db.sqlalchemy import models_metadef as models
from glance import i18n
LOG = logging.getLogger(__name__)
_ = i18n._
_LW = i18n._LW
def _is_namespace_visible(context, namespace, status=None):
"""Return True if the namespace is visible in this context."""
# Is admin == visible
if context.is_admin:
return True
# No owner == visible
if namespace['owner'] is None:
return True
# Is public == visible
if 'visibility' in namespace:
if namespace['visibility'] == 'public':
return True
# context.owner has a value and is the namespace owner == visible
if context.owner is not None:
if context.owner == namespace['owner']:
return True
# Private
return False
def _select_namespaces_query(context, session):
"""Build the query to get all namespaces based on the context"""
LOG.debug("context.is_admin=%(is_admin)s; context.owner=%(owner)s" %
{'is_admin': context.is_admin, 'owner': context.owner})
# If admin, return everything.
query_ns = session.query(models.MetadefNamespace)
if context.is_admin:
return query_ns
else:
# If regular user, return only public namespaces.
# However, if context.owner has a value, return both
# public and private namespaces of the context.owner.
if context.owner is not None:
query = (
query_ns.filter(
or_(models.MetadefNamespace.owner == context.owner,
models.MetadefNamespace.visibility == 'public')))
else:
query = query_ns.filter(
models.MetadefNamespace.visibility == 'public')
return query
def _get(context, namespace_id, session):
"""Get a namespace by id, raise if not found"""
try:
query = session.query(models.MetadefNamespace)\
.filter_by(id=namespace_id)
namespace_rec = query.one()
except sa_orm.exc.NoResultFound:
msg = (_("Metadata definition namespace not found for id=%s")
% namespace_id)
LOG.warn(msg)
raise exc.MetadefNamespaceNotFound(msg)
# Make sure they are allowed to view it.
if not _is_namespace_visible(context, namespace_rec.to_dict()):
msg = ("Forbidding request, metadata definition namespace=%s"
" is not visible.") % namespace_rec.namespace
LOG.debug(msg)
emsg = _("Forbidding request, metadata definition namespace=%s"
" is not visible.") % namespace_rec.namespace
raise exc.MetadefForbidden(emsg)
return namespace_rec
def _get_by_name(context, name, session):
"""Get a namespace by name, raise if not found"""
try:
query = session.query(models.MetadefNamespace)\
.filter_by(namespace=name)
namespace_rec = query.one()
except sa_orm.exc.NoResultFound:
msg = "Metadata definition namespace=%s was not found." % name
LOG.debug(msg)
raise exc.MetadefNamespaceNotFound(namespace_name=name)
# Make sure they are allowed to view it.
if not _is_namespace_visible(context, namespace_rec.to_dict()):
msg = ("Forbidding request, metadata definition namespace=%s"
" is not visible." % name)
LOG.debug(msg)
emsg = _("Forbidding request, metadata definition namespace=%s"
" is not visible.") % name
raise exc.MetadefForbidden(emsg)
return namespace_rec
def _get_all(context, session, filters=None, marker=None,
limit=None, sort_key='created_at', sort_dir='desc'):
"""Get all namespaces that match zero or more filters.
:param filters: dict of filter keys and values.
:param marker: namespace id after which to start page
:param limit: maximum number of namespaces to return
:param sort_key: namespace attribute by which results should be sorted
:param sort_dir: direction in which results should be sorted (asc, desc)
"""
filters = filters or {}
query = _select_namespaces_query(context, session)
# if visibility filter, apply it to the context based query
visibility = filters.pop('visibility', None)
if visibility is not None:
query = query.filter(models.MetadefNamespace.visibility == visibility)
# if id_list filter, apply it to the context based query
id_list = filters.pop('id_list', None)
if id_list is not None:
query = query.filter(models.MetadefNamespace.id.in_(id_list))
marker_namespace = None
if marker is not None:
marker_namespace = _get(context, marker, session)
sort_keys = ['created_at', 'id']
sort_keys.insert(0, sort_key) if sort_key not in sort_keys else sort_keys
query = paginate_query(query=query,
model=models.MetadefNamespace,
limit=limit,
sort_keys=sort_keys,
marker=marker_namespace, sort_dir=sort_dir)
return query.all()
def _get_all_by_resource_types(context, session, filters, marker=None,
limit=None, sort_key=None, sort_dir=None):
"""get all visible namespaces for the specified resource_types"""
resource_types = filters['resource_types']
resource_type_list = resource_types.split(',')
db_recs = (
session.query(models.MetadefResourceType)
.join(models.MetadefResourceType.associations)
.filter(models.MetadefResourceType.name.in_(resource_type_list))
.values(models.MetadefResourceType.name,
models.MetadefNamespaceResourceType.namespace_id)
)
namespace_id_list = []
for name, namespace_id in db_recs:
namespace_id_list.append(namespace_id)
if len(namespace_id_list) is 0:
return []
filters2 = filters
filters2.update({'id_list': namespace_id_list})
return _get_all(context, session, filters2,
marker, limit, sort_key, sort_dir)
def get_all(context, session, marker=None, limit=None,
sort_key=None, sort_dir=None, filters=None):
"""List all visible namespaces"""
namespaces = []
filters = filters or {}
if 'resource_types' in filters:
namespaces = _get_all_by_resource_types(
context, session, filters, marker, limit, sort_key, sort_dir)
else:
namespaces = _get_all(
context, session, filters, marker, limit, sort_key, sort_dir)
return map(lambda ns: ns.to_dict(), namespaces)
def get(context, name, session):
"""Get a namespace by name, raise if not found"""
namespace_rec = _get_by_name(context, name, session)
return namespace_rec.to_dict()
def create(context, values, session):
"""Create a namespace, raise if namespace already exists."""
namespace_name = values['namespace']
namespace = models.MetadefNamespace()
metadef_api.utils.drop_protected_attrs(models.MetadefNamespace, values)
namespace.update(values.copy())
try:
namespace.save(session=session)
except db_exc.DBDuplicateEntry:
msg = ("Can not create the metadata definition namespace."
" Namespace=%s already exists.") % namespace_name
LOG.debug(msg)
raise exc.MetadefDuplicateNamespace(
namespace_name=namespace_name)
return namespace.to_dict()
def update(context, namespace_id, values, session):
"""Update a namespace, raise if not found/visible or duplicate result"""
namespace_rec = _get(context, namespace_id, session)
metadef_api.utils.drop_protected_attrs(models.MetadefNamespace, values)
try:
namespace_rec.update(values.copy())
namespace_rec.save(session=session)
except db_exc.DBDuplicateEntry:
msg = ("Invalid update. It would result in a duplicate"
" metadata definition namespace with the same name of %s"
% values['namespace'])
LOG.debug(msg)
emsg = (_("Invalid update. It would result in a duplicate"
" metadata definition namespace with the same name of %s")
% values['namespace'])
raise exc.MetadefDuplicateNamespace(emsg)
return namespace_rec.to_dict()
def delete(context, name, session):
"""Raise if not found, has references or not visible"""
namespace_rec = _get_by_name(context, name, session)
try:
session.delete(namespace_rec)
session.flush()
except db_exc.DBError as e:
if isinstance(e.inner_exception, sa_exc.IntegrityError):
msg = ("Metadata definition namespace=%s not deleted."
" Other records still refer to it." % name)
LOG.debug(msg)
raise exc.MetadefIntegrityError(
record_type='namespace', record_name=name)
else:
raise e
return namespace_rec.to_dict()
def delete_cascade(context, name, session):
"""Raise if not found, has references or not visible"""
namespace_rec = _get_by_name(context, name, session)
with session.begin():
try:
metadef_api.tag.delete_namespace_content(
context, namespace_rec.id, session)
metadef_api.object.delete_namespace_content(
context, namespace_rec.id, session)
metadef_api.property.delete_namespace_content(
context, namespace_rec.id, session)
metadef_api.resource_type_association.delete_namespace_content(
context, namespace_rec.id, session)
session.delete(namespace_rec)
session.flush()
except db_exc.DBError as e:
if isinstance(e.inner_exception, sa_exc.IntegrityError):
msg = ("Metadata definition namespace=%s not deleted."
" Other records still refer to it." % name)
LOG.debug(msg)
raise exc.MetadefIntegrityError(
record_type='namespace', record_name=name)
else:
raise e
return namespace_rec.to_dict()
|
andymckay/zamboni | refs/heads/master | migrations/654-award-theme-points.py | 113 | #!/usr/bin/env python
def run():
return
|
oandrew/home-assistant | refs/heads/dev | tests/components/camera/test_generic.py | 13 | """The tests for generic camera component."""
import asyncio
from unittest import mock
from homeassistant.bootstrap import setup_component
@asyncio.coroutine
def test_fetching_url(aioclient_mock, hass, test_client):
"""Test that it fetches the given url."""
aioclient_mock.get('http://example.com', text='hello world')
def setup_platform():
"""Setup the platform."""
assert setup_component(hass, 'camera', {
'camera': {
'name': 'config_test',
'platform': 'generic',
'still_image_url': 'http://example.com',
'username': 'user',
'password': 'pass'
}})
yield from hass.loop.run_in_executor(None, setup_platform)
client = yield from test_client(hass.http.app)
resp = yield from client.get('/api/camera_proxy/camera.config_test')
assert resp.status == 200
assert aioclient_mock.call_count == 1
body = yield from resp.text()
assert body == 'hello world'
resp = yield from client.get('/api/camera_proxy/camera.config_test')
assert aioclient_mock.call_count == 2
@asyncio.coroutine
def test_limit_refetch(aioclient_mock, hass, test_client):
"""Test that it fetches the given url."""
aioclient_mock.get('http://example.com/5a', text='hello world')
aioclient_mock.get('http://example.com/10a', text='hello world')
aioclient_mock.get('http://example.com/15a', text='hello planet')
aioclient_mock.get('http://example.com/20a', status=404)
def setup_platform():
"""Setup the platform."""
assert setup_component(hass, 'camera', {
'camera': {
'name': 'config_test',
'platform': 'generic',
'still_image_url':
'http://example.com/{{ states.sensor.temp.state + "a" }}',
'limit_refetch_to_url_change': True,
}})
yield from hass.loop.run_in_executor(None, setup_platform)
client = yield from test_client(hass.http.app)
resp = yield from client.get('/api/camera_proxy/camera.config_test')
hass.states.async_set('sensor.temp', '5')
with mock.patch('async_timeout.timeout',
side_effect=asyncio.TimeoutError()):
resp = yield from client.get('/api/camera_proxy/camera.config_test')
assert aioclient_mock.call_count == 0
assert resp.status == 500
hass.states.async_set('sensor.temp', '10')
resp = yield from client.get('/api/camera_proxy/camera.config_test')
assert aioclient_mock.call_count == 1
assert resp.status == 200
body = yield from resp.text()
assert body == 'hello world'
resp = yield from client.get('/api/camera_proxy/camera.config_test')
assert aioclient_mock.call_count == 1
assert resp.status == 200
body = yield from resp.text()
assert body == 'hello world'
hass.states.async_set('sensor.temp', '15')
# Url change = fetch new image
resp = yield from client.get('/api/camera_proxy/camera.config_test')
assert aioclient_mock.call_count == 2
assert resp.status == 200
body = yield from resp.text()
assert body == 'hello planet'
# Cause a template render error
hass.states.async_remove('sensor.temp')
resp = yield from client.get('/api/camera_proxy/camera.config_test')
assert aioclient_mock.call_count == 2
assert resp.status == 200
body = yield from resp.text()
assert body == 'hello planet'
|
RuiShu/bcde | refs/heads/master | config.py | 1 | import argparse
import sys
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# General settings
parser.add_argument("--run", type=int, default=0, help="Run index. Use 0 if first run.")
parser.add_argument("--n-save", type=int, default=50, help="Number of epochs before save.")
parser.add_argument("--save-dir", type=str, default='/scratch/users/rshu15/bcde',
help="Save model directory.")
# Data settings
parser.add_argument("--task", type=str, default='q2', help="Data task")
parser.add_argument("--shift", type=str, default='none', help="Whether to shift data")
parser.add_argument("--n-label", type=int, default=25000, help="Number of labeled data.")
parser.add_argument("--n-total", type=int, default=50000, help="Total number of data.")
parser.add_argument('--seed', type=int, default=0, help='Seed for semi-sup conversion')
# Model settings
parser.add_argument('--model', type=str, default='hybrid', choices=['hybrid', 'hybrid_factored', 'conditional', 'pretrained'],
help='Type of model')
parser.add_argument("--z-size", type=int, default=50, help="Size of z.")
parser.add_argument("--h-size", type=int, default=500, help="Size of h.")
parser.add_argument("--nonlin", type=str, default='elu', help="Activation function.")
parser.add_argument("--eps", type=float, default=1e-5, help="Distribution epsilon.")
parser.add_argument("--l2", type=float, default=0.01, help="L2 weight to use (if !NA)")
# Optimization settings
parser.add_argument("--n-epochs", type=int, default=300, help="Number of epochs.")
parser.add_argument("--bs", type=int, default=100, help="Minibatch size.")
parser.add_argument("--lr", type=float, default=5e-4, help="Learning rate.")
parser.add_argument("--adamax", type=int, default=0, help="Adamax v. Adam")
# Log settings
parser.add_argument("--n-checks", type=int, default=50, help="Number of IW=100 checks.")
parser.add_argument("--n-models", type=int, default=1, help="Max number of models to save.")
parser.add_argument("--n-pretrain-epochs", type=int, default=150, help="Number of pretrain epochs (if !NA).")
if 'ipykernel' in sys.argv[0]:
parser.set_defaults(run=999, seed=999)
args = parser.parse_args([])
else:
args = parser.parse_args()
if args.task in {'q2', 'td'}:
args.x_size = 392
elif args.task == 'q3':
args.x_size = 588
elif args.task == 'q1':
args.x_size = 196
else:
raise Exception('Unrecognized args.task')
args.y_size = 784 - args.x_size
|
masayukig/tempest | refs/heads/master | tempest/api/identity/admin/v2/test_roles.py | 3 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.identity import base
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
class RolesTestJSON(base.BaseIdentityV2AdminTest):
@classmethod
def resource_setup(cls):
super(RolesTestJSON, cls).resource_setup()
cls.roles = list()
for _ in range(5):
role_name = data_utils.rand_name(name='role')
role = cls.roles_client.create_role(name=role_name)['role']
cls.addClassResourceCleanup(
test_utils.call_and_ignore_notfound_exc,
cls.roles_client.delete_role, role['id'])
cls.roles.append(role)
def _get_role_params(self):
user = self.setup_test_user()
tenant = self.tenants_client.show_tenant(user['tenantId'])['tenant']
role = self.setup_test_role()
return (user, tenant, role)
def assert_role_in_role_list(self, role, roles):
found = False
for user_role in roles:
if user_role['id'] == role['id']:
found = True
self.assertTrue(found, "assigned role was not in list")
@decorators.idempotent_id('75d9593f-50b7-4fcf-bd64-e3fb4a278e23')
def test_list_roles(self):
"""Return a list of all roles."""
body = self.roles_client.list_roles()['roles']
found = [role for role in body if role in self.roles]
self.assertNotEmpty(found)
self.assertEqual(len(found), len(self.roles))
@decorators.idempotent_id('c62d909d-6c21-48c0-ae40-0a0760e6db5e')
def test_role_create_delete(self):
"""Role should be created, verified, and deleted."""
role_name = data_utils.rand_name(name='role-test')
body = self.roles_client.create_role(name=role_name)['role']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.roles_client.delete_role, body['id'])
self.assertEqual(role_name, body['name'])
body = self.roles_client.list_roles()['roles']
found = [role for role in body if role['name'] == role_name]
self.assertNotEmpty(found)
body = self.roles_client.delete_role(found[0]['id'])
body = self.roles_client.list_roles()['roles']
found = [role for role in body if role['name'] == role_name]
self.assertEmpty(found)
@decorators.idempotent_id('db6870bd-a6ed-43be-a9b1-2f10a5c9994f')
def test_get_role_by_id(self):
"""Get a role by its id."""
role = self.setup_test_role()
role_id = role['id']
role_name = role['name']
body = self.roles_client.show_role(role_id)['role']
self.assertEqual(role_id, body['id'])
self.assertEqual(role_name, body['name'])
@decorators.idempotent_id('0146f675-ffbd-4208-b3a4-60eb628dbc5e')
def test_assign_user_role(self):
"""Assign a role to a user on a tenant."""
(user, tenant, role) = self._get_role_params()
self.roles_client.create_user_role_on_project(tenant['id'],
user['id'],
role['id'])
roles = self.roles_client.list_user_roles_on_project(
tenant['id'], user['id'])['roles']
self.assert_role_in_role_list(role, roles)
@decorators.idempotent_id('f0b9292c-d3ba-4082-aa6c-440489beef69')
def test_remove_user_role(self):
"""Remove a role assigned to a user on a tenant."""
(user, tenant, role) = self._get_role_params()
user_role = self.roles_client.create_user_role_on_project(
tenant['id'], user['id'], role['id'])['role']
self.roles_client.delete_role_from_user_on_project(tenant['id'],
user['id'],
user_role['id'])
@decorators.idempotent_id('262e1e3e-ed71-4edd-a0e5-d64e83d66d05')
def test_list_user_roles(self):
"""List roles assigned to a user on tenant."""
(user, tenant, role) = self._get_role_params()
self.roles_client.create_user_role_on_project(tenant['id'],
user['id'],
role['id'])
roles = self.roles_client.list_user_roles_on_project(
tenant['id'], user['id'])['roles']
self.assert_role_in_role_list(role, roles)
|
axbaretto/beam | refs/heads/master | sdks/python/apache_beam/utils/interactive_utils.py | 5 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Common interactive utility module.
For experimental usage only; no backwards-compatibility guarantees.
"""
# pytype: skip-file
import logging
_LOGGER = logging.getLogger(__name__)
def is_in_ipython():
"""Determines if current code is executed within an ipython session."""
try:
from IPython import get_ipython # pylint: disable=import-error
if get_ipython():
return True
return False
except ImportError:
# If dependencies are not available, then not interactive for sure.
return False
except (KeyboardInterrupt, SystemExit):
raise
except: # pylint: disable=bare-except
_LOGGER.info(
'Unexpected error occurred, treated as not in IPython.', exc_info=True)
return False
def is_in_notebook():
"""Determines if current code is executed from an ipython notebook.
If is_in_notebook() is True, then is_in_ipython() must also be True.
"""
is_in_notebook = False
if is_in_ipython():
# The import and usage must be valid under the execution path.
from IPython import get_ipython
if 'IPKernelApp' in get_ipython().config:
is_in_notebook = True
return is_in_notebook
def alter_label_if_ipython(transform, pvalueish):
"""Alters the label to an interactive label with ipython prompt metadata
prefixed for the given transform if the given pvalueish belongs to a
user-defined pipeline and current code execution is within an ipython kernel.
Otherwise, noop.
A label is either a user-defined or auto-generated str name of a PTransform
that is unique within a pipeline. If current environment is_in_ipython(), Beam
can implicitly create interactive labels to replace labels of top-level
PTransforms to be applied. The label is formatted as:
`Cell {prompt}: {original_label}`.
"""
if is_in_ipython():
from apache_beam.runners.interactive import interactive_environment as ie
# Tracks user defined pipeline instances in watched scopes so that we only
# alter labels for any transform to pvalueish belonging to those pipeline
# instances, excluding any transform to be applied in other pipeline
# instances the Beam SDK creates implicitly.
ie.current_env().track_user_pipelines()
from IPython import get_ipython
prompt = get_ipython().execution_count
pipeline = _extract_pipeline_of_pvalueish(pvalueish)
if (pipeline
# We only alter for transforms to be applied to user-defined pipelines
# at pipeline construction time.
and pipeline in ie.current_env().tracked_user_pipelines):
transform.label = '[{}]: {}'.format(prompt, transform.label)
def _extract_pipeline_of_pvalueish(pvalueish):
"""Extracts the pipeline that the given pvalueish belongs to."""
if isinstance(pvalueish, tuple) and len(pvalueish) > 0:
pvalue = pvalueish[0]
elif isinstance(pvalueish, dict) and len(pvalueish) > 0:
pvalue = next(iter(pvalueish.values()))
else:
pvalue = pvalueish
if hasattr(pvalue, 'pipeline'):
return pvalue.pipeline
return None
|
mumax/1 | refs/heads/release | test/excitation/pointwise.py | 1 | # Micromagnetic standard problem 4
from mumax import *
# material
msat(800e3)
aexch(1.3e-11)
alpha(2)
# geometry
nx = 64
ny = 64
gridsize(nx, ny, 1)
partsize(500e-9, 500e-9, 50e-9)
# initial magnetization
uniform(1, 1, 0)
# run
autosave("table", "ascii", 1e-12)
applypointwise('field', 1e-9, 1e-3, 0, 0)
applypointwise('field', 2e-9, 0e-3, 0, 0)
applypointwise('field', 3e-9, 1e-3, 0, 0)
applypointwise('field', 4e-9, 0e-3, 0, 0)
run(5e-9)
|
Etwigg/Examples | refs/heads/master | Group Project Website/venv/Lib/site-packages/pip/commands/show.py | 344 | from __future__ import absolute_import
from email.parser import FeedParser
import logging
import os
from pip.basecommand import Command
from pip.status_codes import SUCCESS, ERROR
from pip._vendor import pkg_resources
from pip._vendor.packaging.utils import canonicalize_name
logger = logging.getLogger(__name__)
class ShowCommand(Command):
"""Show information about one or more installed packages."""
name = 'show'
usage = """
%prog [options] <package> ..."""
summary = 'Show information about installed packages.'
def __init__(self, *args, **kw):
super(ShowCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-f', '--files',
dest='files',
action='store_true',
default=False,
help='Show the full list of installed files for each package.')
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
if not args:
logger.warning('ERROR: Please provide a package name or names.')
return ERROR
query = args
results = search_packages_info(query)
if not print_results(
results, list_files=options.files, verbose=options.verbose):
return ERROR
return SUCCESS
def search_packages_info(query):
"""
Gather details from installed distributions. Print distribution name,
version, location, and installed files. Installed files requires a
pip generated 'installed-files.txt' in the distributions '.egg-info'
directory.
"""
installed = {}
for p in pkg_resources.working_set:
installed[canonicalize_name(p.project_name)] = p
query_names = [canonicalize_name(name) for name in query]
for dist in [installed[pkg] for pkg in query_names if pkg in installed]:
package = {
'name': dist.project_name,
'version': dist.version,
'location': dist.location,
'requires': [dep.project_name for dep in dist.requires()],
}
file_list = None
metadata = None
if isinstance(dist, pkg_resources.DistInfoDistribution):
# RECORDs should be part of .dist-info metadatas
if dist.has_metadata('RECORD'):
lines = dist.get_metadata_lines('RECORD')
paths = [l.split(',')[0] for l in lines]
paths = [os.path.join(dist.location, p) for p in paths]
file_list = [os.path.relpath(p, dist.location) for p in paths]
if dist.has_metadata('METADATA'):
metadata = dist.get_metadata('METADATA')
else:
# Otherwise use pip's log for .egg-info's
if dist.has_metadata('installed-files.txt'):
paths = dist.get_metadata_lines('installed-files.txt')
paths = [os.path.join(dist.egg_info, p) for p in paths]
file_list = [os.path.relpath(p, dist.location) for p in paths]
if dist.has_metadata('PKG-INFO'):
metadata = dist.get_metadata('PKG-INFO')
if dist.has_metadata('entry_points.txt'):
entry_points = dist.get_metadata_lines('entry_points.txt')
package['entry_points'] = entry_points
if dist.has_metadata('INSTALLER'):
for line in dist.get_metadata_lines('INSTALLER'):
if line.strip():
package['installer'] = line.strip()
break
# @todo: Should pkg_resources.Distribution have a
# `get_pkg_info` method?
feed_parser = FeedParser()
feed_parser.feed(metadata)
pkg_info_dict = feed_parser.close()
for key in ('metadata-version', 'summary',
'home-page', 'author', 'author-email', 'license'):
package[key] = pkg_info_dict.get(key)
# It looks like FeedParser cannot deal with repeated headers
classifiers = []
for line in metadata.splitlines():
if line.startswith('Classifier: '):
classifiers.append(line[len('Classifier: '):])
package['classifiers'] = classifiers
if file_list:
package['files'] = sorted(file_list)
yield package
def print_results(distributions, list_files=False, verbose=False):
"""
Print the informations from installed distributions found.
"""
results_printed = False
for i, dist in enumerate(distributions):
results_printed = True
if i > 0:
logger.info("---")
logger.info("Name: %s", dist.get('name', ''))
logger.info("Version: %s", dist.get('version', ''))
logger.info("Summary: %s", dist.get('summary', ''))
logger.info("Home-page: %s", dist.get('home-page', ''))
logger.info("Author: %s", dist.get('author', ''))
logger.info("Author-email: %s", dist.get('author-email', ''))
logger.info("License: %s", dist.get('license', ''))
logger.info("Location: %s", dist.get('location', ''))
logger.info("Requires: %s", ', '.join(dist.get('requires', [])))
if verbose:
logger.info("Metadata-Version: %s",
dist.get('metadata-version', ''))
logger.info("Installer: %s", dist.get('installer', ''))
logger.info("Classifiers:")
for classifier in dist.get('classifiers', []):
logger.info(" %s", classifier)
logger.info("Entry-points:")
for entry in dist.get('entry_points', []):
logger.info(" %s", entry.strip())
if list_files:
logger.info("Files:")
for line in dist.get('files', []):
logger.info(" %s", line.strip())
if "files" not in dist:
logger.info("Cannot locate installed-files.txt")
return results_printed
|
Skryptex/SKX-EDIT | refs/heads/master | share/qt/extract_strings_qt.py | 2945 | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
OUT_CPP="src/qt/bitcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = glob.glob('src/*.cpp') + glob.glob('src/*.h')
# xgettext -n --keyword=_ $FILES
child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitcoin_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};')
f.close()
|
StratoDem/sd-data-table | refs/heads/master | sd_data_table/__init__.py | 1 | import os as _os
import dash as _dash
import sys as _sys
from .version import __version__
_current_path = _os.path.dirname(_os.path.abspath(__file__))
_components = _dash.development.component_loader.load_components(
_os.path.join(_current_path, 'metadata.json'),
'sd_data_table'
)
_this_module = _sys.modules[__name__]
_js_dist = [
{
"relative_package_path": "bundle.js",
"external_url": (
"https://unpkg.com/sd-data-table@{}"
"/sd_data_table/bundle.js"
).format(__version__),
"namespace": "sd_data_table"
}
]
_css_dist = []
for _component in _components:
setattr(_this_module, _component.__name__, _component)
setattr(_component, '_js_dist', _js_dist)
setattr(_component, '_css_dist', _css_dist)
|
bentilly/heroes | refs/heads/development | lib/werkzeug/contrib/sessions.py | 295 | # -*- coding: utf-8 -*-
r"""
werkzeug.contrib.sessions
~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains some helper classes that help one to add session
support to a python WSGI application. For full client-side session
storage see :mod:`~werkzeug.contrib.securecookie` which implements a
secure, client-side session storage.
Application Integration
=======================
::
from werkzeug.contrib.sessions import SessionMiddleware, \
FilesystemSessionStore
app = SessionMiddleware(app, FilesystemSessionStore())
The current session will then appear in the WSGI environment as
`werkzeug.session`. However it's recommended to not use the middleware
but the stores directly in the application. However for very simple
scripts a middleware for sessions could be sufficient.
This module does not implement methods or ways to check if a session is
expired. That should be done by a cronjob and storage specific. For
example to prune unused filesystem sessions one could check the modified
time of the files. It sessions are stored in the database the new()
method should add an expiration timestamp for the session.
For better flexibility it's recommended to not use the middleware but the
store and session object directly in the application dispatching::
session_store = FilesystemSessionStore()
def application(environ, start_response):
request = Request(environ)
sid = request.cookies.get('cookie_name')
if sid is None:
request.session = session_store.new()
else:
request.session = session_store.get(sid)
response = get_the_response_object(request)
if request.session.should_save:
session_store.save(request.session)
response.set_cookie('cookie_name', request.session.sid)
return response(environ, start_response)
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import os
import sys
import tempfile
from os import path
from time import time
from random import random
from hashlib import sha1
from pickle import dump, load, HIGHEST_PROTOCOL
from werkzeug.datastructures import CallbackDict
from werkzeug.utils import dump_cookie, parse_cookie
from werkzeug.wsgi import ClosingIterator
from werkzeug.posixemulation import rename
from werkzeug._compat import PY2, text_type
_sha1_re = re.compile(r'^[a-f0-9]{40}$')
def _urandom():
if hasattr(os, 'urandom'):
return os.urandom(30)
return text_type(random()).encode('ascii')
def generate_key(salt=None):
if salt is None:
salt = repr(salt).encode('ascii')
return sha1(b''.join([
salt,
str(time()).encode('ascii'),
_urandom()
])).hexdigest()
class ModificationTrackingDict(CallbackDict):
__slots__ = ('modified',)
def __init__(self, *args, **kwargs):
def on_update(self):
self.modified = True
self.modified = False
CallbackDict.__init__(self, on_update=on_update)
dict.update(self, *args, **kwargs)
def copy(self):
"""Create a flat copy of the dict."""
missing = object()
result = object.__new__(self.__class__)
for name in self.__slots__:
val = getattr(self, name, missing)
if val is not missing:
setattr(result, name, val)
return result
def __copy__(self):
return self.copy()
class Session(ModificationTrackingDict):
"""Subclass of a dict that keeps track of direct object changes. Changes
in mutable structures are not tracked, for those you have to set
`modified` to `True` by hand.
"""
__slots__ = ModificationTrackingDict.__slots__ + ('sid', 'new')
def __init__(self, data, sid, new=False):
ModificationTrackingDict.__init__(self, data)
self.sid = sid
self.new = new
def __repr__(self):
return '<%s %s%s>' % (
self.__class__.__name__,
dict.__repr__(self),
self.should_save and '*' or ''
)
@property
def should_save(self):
"""True if the session should be saved.
.. versionchanged:: 0.6
By default the session is now only saved if the session is
modified, not if it is new like it was before.
"""
return self.modified
class SessionStore(object):
"""Baseclass for all session stores. The Werkzeug contrib module does not
implement any useful stores besides the filesystem store, application
developers are encouraged to create their own stores.
:param session_class: The session class to use. Defaults to
:class:`Session`.
"""
def __init__(self, session_class=None):
if session_class is None:
session_class = Session
self.session_class = session_class
def is_valid_key(self, key):
"""Check if a key has the correct format."""
return _sha1_re.match(key) is not None
def generate_key(self, salt=None):
"""Simple function that generates a new session key."""
return generate_key(salt)
def new(self):
"""Generate a new session."""
return self.session_class({}, self.generate_key(), True)
def save(self, session):
"""Save a session."""
def save_if_modified(self, session):
"""Save if a session class wants an update."""
if session.should_save:
self.save(session)
def delete(self, session):
"""Delete a session."""
def get(self, sid):
"""Get a session for this sid or a new session object. This method
has to check if the session key is valid and create a new session if
that wasn't the case.
"""
return self.session_class({}, sid, True)
#: used for temporary files by the filesystem session store
_fs_transaction_suffix = '.__wz_sess'
class FilesystemSessionStore(SessionStore):
"""Simple example session store that saves sessions on the filesystem.
This store works best on POSIX systems and Windows Vista / Windows
Server 2008 and newer.
.. versionchanged:: 0.6
`renew_missing` was added. Previously this was considered `True`,
now the default changed to `False` and it can be explicitly
deactivated.
:param path: the path to the folder used for storing the sessions.
If not provided the default temporary directory is used.
:param filename_template: a string template used to give the session
a filename. ``%s`` is replaced with the
session id.
:param session_class: The session class to use. Defaults to
:class:`Session`.
:param renew_missing: set to `True` if you want the store to
give the user a new sid if the session was
not yet saved.
"""
def __init__(self, path=None, filename_template='werkzeug_%s.sess',
session_class=None, renew_missing=False, mode=0o644):
SessionStore.__init__(self, session_class)
if path is None:
path = tempfile.gettempdir()
self.path = path
if isinstance(filename_template, text_type) and PY2:
filename_template = filename_template.encode(
sys.getfilesystemencoding() or 'utf-8')
assert not filename_template.endswith(_fs_transaction_suffix), \
'filename templates may not end with %s' % _fs_transaction_suffix
self.filename_template = filename_template
self.renew_missing = renew_missing
self.mode = mode
def get_session_filename(self, sid):
# out of the box, this should be a strict ASCII subset but
# you might reconfigure the session object to have a more
# arbitrary string.
if isinstance(sid, text_type) and PY2:
sid = sid.encode(sys.getfilesystemencoding() or 'utf-8')
return path.join(self.path, self.filename_template % sid)
def save(self, session):
fn = self.get_session_filename(session.sid)
fd, tmp = tempfile.mkstemp(suffix=_fs_transaction_suffix,
dir=self.path)
f = os.fdopen(fd, 'wb')
try:
dump(dict(session), f, HIGHEST_PROTOCOL)
finally:
f.close()
try:
rename(tmp, fn)
os.chmod(fn, self.mode)
except (IOError, OSError):
pass
def delete(self, session):
fn = self.get_session_filename(session.sid)
try:
os.unlink(fn)
except OSError:
pass
def get(self, sid):
if not self.is_valid_key(sid):
return self.new()
try:
f = open(self.get_session_filename(sid), 'rb')
except IOError:
if self.renew_missing:
return self.new()
data = {}
else:
try:
try:
data = load(f)
except Exception:
data = {}
finally:
f.close()
return self.session_class(data, sid, False)
def list(self):
"""Lists all sessions in the store.
.. versionadded:: 0.6
"""
before, after = self.filename_template.split('%s', 1)
filename_re = re.compile(r'%s(.{5,})%s$' % (re.escape(before),
re.escape(after)))
result = []
for filename in os.listdir(self.path):
#: this is a session that is still being saved.
if filename.endswith(_fs_transaction_suffix):
continue
match = filename_re.match(filename)
if match is not None:
result.append(match.group(1))
return result
class SessionMiddleware(object):
"""A simple middleware that puts the session object of a store provided
into the WSGI environ. It automatically sets cookies and restores
sessions.
However a middleware is not the preferred solution because it won't be as
fast as sessions managed by the application itself and will put a key into
the WSGI environment only relevant for the application which is against
the concept of WSGI.
The cookie parameters are the same as for the :func:`~dump_cookie`
function just prefixed with ``cookie_``. Additionally `max_age` is
called `cookie_age` and not `cookie_max_age` because of backwards
compatibility.
"""
def __init__(self, app, store, cookie_name='session_id',
cookie_age=None, cookie_expires=None, cookie_path='/',
cookie_domain=None, cookie_secure=None,
cookie_httponly=False, environ_key='werkzeug.session'):
self.app = app
self.store = store
self.cookie_name = cookie_name
self.cookie_age = cookie_age
self.cookie_expires = cookie_expires
self.cookie_path = cookie_path
self.cookie_domain = cookie_domain
self.cookie_secure = cookie_secure
self.cookie_httponly = cookie_httponly
self.environ_key = environ_key
def __call__(self, environ, start_response):
cookie = parse_cookie(environ.get('HTTP_COOKIE', ''))
sid = cookie.get(self.cookie_name, None)
if sid is None:
session = self.store.new()
else:
session = self.store.get(sid)
environ[self.environ_key] = session
def injecting_start_response(status, headers, exc_info=None):
if session.should_save:
self.store.save(session)
headers.append(('Set-Cookie', dump_cookie(self.cookie_name,
session.sid, self.cookie_age,
self.cookie_expires, self.cookie_path,
self.cookie_domain, self.cookie_secure,
self.cookie_httponly)))
return start_response(status, headers, exc_info)
return ClosingIterator(self.app(environ, injecting_start_response),
lambda: self.store.save_if_modified(session))
|
reddraggone9/youtube-dl | refs/heads/master | youtube_dl/extractor/ruhd.py | 149 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .common import InfoExtractor
class RUHDIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?ruhd\.ru/play\.php\?vid=(?P<id>\d+)'
_TEST = {
'url': 'http://www.ruhd.ru/play.php?vid=207',
'md5': 'd1a9ec4edf8598e3fbd92bb16072ba83',
'info_dict': {
'id': '207',
'ext': 'divx',
'title': 'КОТ бааааам',
'description': 'классный кот)',
'thumbnail': 're:^http://.*\.jpg$',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_url = self._html_search_regex(
r'<param name="src" value="([^"]+)"', webpage, 'video url')
title = self._html_search_regex(
r'<title>([^<]+) RUHD.ru - Видео Высокого качества №1 в России!</title>',
webpage, 'title')
description = self._html_search_regex(
r'(?s)<div id="longdesc">(.+?)<span id="showlink">',
webpage, 'description', fatal=False)
thumbnail = self._html_search_regex(
r'<param name="previewImage" value="([^"]+)"',
webpage, 'thumbnail', fatal=False)
if thumbnail:
thumbnail = 'http://www.ruhd.ru' + thumbnail
return {
'id': video_id,
'url': video_url,
'title': title,
'description': description,
'thumbnail': thumbnail,
}
|
MoritzS/django | refs/heads/master | tests/utils_tests/test_termcolors.py | 134 | import unittest
from django.utils.termcolors import (
DARK_PALETTE, DEFAULT_PALETTE, LIGHT_PALETTE, NOCOLOR_PALETTE, PALETTES,
colorize, parse_color_setting,
)
class TermColorTests(unittest.TestCase):
def test_empty_string(self):
self.assertEqual(parse_color_setting(''), PALETTES[DEFAULT_PALETTE])
def test_simple_palette(self):
self.assertEqual(parse_color_setting('light'), PALETTES[LIGHT_PALETTE])
self.assertEqual(parse_color_setting('dark'), PALETTES[DARK_PALETTE])
self.assertIsNone(parse_color_setting('nocolor'))
def test_fg(self):
self.assertEqual(
parse_color_setting('error=green'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'})
)
def test_fg_bg(self):
self.assertEqual(
parse_color_setting('error=green/blue'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'bg': 'blue'})
)
def test_fg_opts(self):
self.assertEqual(
parse_color_setting('error=green,blink'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'opts': ('blink',)})
)
self.assertEqual(
parse_color_setting('error=green,bold,blink'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'opts': ('blink', 'bold')})
)
def test_fg_bg_opts(self):
self.assertEqual(
parse_color_setting('error=green/blue,blink'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'bg': 'blue', 'opts': ('blink',)})
)
self.assertEqual(
parse_color_setting('error=green/blue,bold,blink'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'bg': 'blue', 'opts': ('blink', 'bold')})
)
def test_override_palette(self):
self.assertEqual(
parse_color_setting('light;error=green'),
dict(PALETTES[LIGHT_PALETTE], ERROR={'fg': 'green'})
)
def test_override_nocolor(self):
self.assertEqual(
parse_color_setting('nocolor;error=green'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'})
)
def test_reverse_override(self):
self.assertEqual(parse_color_setting('error=green;light'), PALETTES[LIGHT_PALETTE])
def test_multiple_roles(self):
self.assertEqual(
parse_color_setting('error=green;sql_field=blue'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'}, SQL_FIELD={'fg': 'blue'})
)
def test_override_with_multiple_roles(self):
self.assertEqual(
parse_color_setting('light;error=green;sql_field=blue'),
dict(PALETTES[LIGHT_PALETTE], ERROR={'fg': 'green'}, SQL_FIELD={'fg': 'blue'})
)
def test_empty_definition(self):
self.assertIsNone(parse_color_setting(';'))
self.assertEqual(parse_color_setting('light;'), PALETTES[LIGHT_PALETTE])
self.assertIsNone(parse_color_setting(';;;'))
def test_empty_options(self):
self.assertEqual(
parse_color_setting('error=green,'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'})
)
self.assertEqual(
parse_color_setting('error=green,,,'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'})
)
self.assertEqual(
parse_color_setting('error=green,,blink,,'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'opts': ('blink',)})
)
def test_bad_palette(self):
self.assertIsNone(parse_color_setting('unknown'))
def test_bad_role(self):
self.assertIsNone(parse_color_setting('unknown='))
self.assertIsNone(parse_color_setting('unknown=green'))
self.assertEqual(
parse_color_setting('unknown=green;sql_field=blue'),
dict(PALETTES[NOCOLOR_PALETTE], SQL_FIELD={'fg': 'blue'})
)
def test_bad_color(self):
self.assertIsNone(parse_color_setting('error='))
self.assertEqual(
parse_color_setting('error=;sql_field=blue'),
dict(PALETTES[NOCOLOR_PALETTE], SQL_FIELD={'fg': 'blue'})
)
self.assertIsNone(parse_color_setting('error=unknown'))
self.assertEqual(
parse_color_setting('error=unknown;sql_field=blue'),
dict(PALETTES[NOCOLOR_PALETTE], SQL_FIELD={'fg': 'blue'})
)
self.assertEqual(
parse_color_setting('error=green/unknown'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'})
)
self.assertEqual(
parse_color_setting('error=green/blue/something'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'bg': 'blue'})
)
self.assertEqual(
parse_color_setting('error=green/blue/something,blink'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'bg': 'blue', 'opts': ('blink',)})
)
def test_bad_option(self):
self.assertEqual(
parse_color_setting('error=green,unknown'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'})
)
self.assertEqual(
parse_color_setting('error=green,unknown,blink'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'opts': ('blink',)})
)
def test_role_case(self):
self.assertEqual(
parse_color_setting('ERROR=green'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'})
)
self.assertEqual(
parse_color_setting('eRrOr=green'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'})
)
def test_color_case(self):
self.assertEqual(
parse_color_setting('error=GREEN'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'})
)
self.assertEqual(
parse_color_setting('error=GREEN/BLUE'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'bg': 'blue'})
)
self.assertEqual(
parse_color_setting('error=gReEn'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'})
)
self.assertEqual(
parse_color_setting('error=gReEn/bLuE'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'bg': 'blue'})
)
def test_opts_case(self):
self.assertEqual(
parse_color_setting('error=green,BLINK'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'opts': ('blink',)})
)
self.assertEqual(
parse_color_setting('error=green,bLiNk'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'opts': ('blink',)})
)
def test_colorize_empty_text(self):
self.assertEqual(colorize(text=None), '\x1b[m\x1b[0m')
self.assertEqual(colorize(text=''), '\x1b[m\x1b[0m')
self.assertEqual(colorize(text=None, opts=('noreset')), '\x1b[m')
self.assertEqual(colorize(text='', opts=('noreset')), '\x1b[m')
|
MalloyPower/parsing-python | refs/heads/master | front-end/testsuite-python-lib/Python-2.6/Lib/plat-os2emx/SOCKET.py | 134 | # Generated by h2py from f:/emx/include/sys/socket.h
# Included from sys/types.h
FD_SETSIZE = 256
# Included from sys/uio.h
FREAD = 1
FWRITE = 2
SOCK_STREAM = 1
SOCK_DGRAM = 2
SOCK_RAW = 3
SOCK_RDM = 4
SOCK_SEQPACKET = 5
SO_DEBUG = 0x0001
SO_ACCEPTCONN = 0x0002
SO_REUSEADDR = 0x0004
SO_KEEPALIVE = 0x0008
SO_DONTROUTE = 0x0010
SO_BROADCAST = 0x0020
SO_USELOOPBACK = 0x0040
SO_LINGER = 0x0080
SO_OOBINLINE = 0x0100
SO_L_BROADCAST = 0x0200
SO_RCV_SHUTDOWN = 0x0400
SO_SND_SHUTDOWN = 0x0800
SO_SNDBUF = 0x1001
SO_RCVBUF = 0x1002
SO_SNDLOWAT = 0x1003
SO_RCVLOWAT = 0x1004
SO_SNDTIMEO = 0x1005
SO_RCVTIMEO = 0x1006
SO_ERROR = 0x1007
SO_TYPE = 0x1008
SO_OPTIONS = 0x1010
SOL_SOCKET = 0xffff
AF_UNSPEC = 0
AF_UNIX = 1
AF_INET = 2
AF_IMPLINK = 3
AF_PUP = 4
AF_CHAOS = 5
AF_NS = 6
AF_NBS = 7
AF_ISO = 7
AF_OSI = AF_ISO
AF_ECMA = 8
AF_DATAKIT = 9
AF_CCITT = 10
AF_SNA = 11
AF_DECnet = 12
AF_DLI = 13
AF_LAT = 14
AF_HYLINK = 15
AF_APPLETALK = 16
AF_NB = 17
AF_NETBIOS = AF_NB
AF_OS2 = AF_UNIX
AF_MAX = 18
PF_UNSPEC = AF_UNSPEC
PF_UNIX = AF_UNIX
PF_INET = AF_INET
PF_IMPLINK = AF_IMPLINK
PF_PUP = AF_PUP
PF_CHAOS = AF_CHAOS
PF_NS = AF_NS
PF_NBS = AF_NBS
PF_ISO = AF_ISO
PF_OSI = AF_ISO
PF_ECMA = AF_ECMA
PF_DATAKIT = AF_DATAKIT
PF_CCITT = AF_CCITT
PF_SNA = AF_SNA
PF_DECnet = AF_DECnet
PF_DLI = AF_DLI
PF_LAT = AF_LAT
PF_HYLINK = AF_HYLINK
PF_APPLETALK = AF_APPLETALK
PF_NB = AF_NB
PF_NETBIOS = AF_NB
PF_OS2 = AF_UNIX
PF_MAX = AF_MAX
SOMAXCONN = 5
MSG_OOB = 0x1
MSG_PEEK = 0x2
MSG_DONTROUTE = 0x4
MSG_EOR = 0x8
MSG_TRUNC = 0x10
MSG_CTRUNC = 0x20
MSG_WAITALL = 0x40
MSG_MAXIOVLEN = 16
SCM_RIGHTS = 0x01
MT_FREE = 0
MT_DATA = 1
MT_HEADER = 2
MT_SOCKET = 3
MT_PCB = 4
MT_RTABLE = 5
MT_HTABLE = 6
MT_ATABLE = 7
MT_SONAME = 8
MT_ZOMBIE = 9
MT_SOOPTS = 10
MT_FTABLE = 11
MT_RIGHTS = 12
MT_IFADDR = 13
MAXSOCKETS = 2048
|
youdonghai/intellij-community | refs/heads/master | python/testData/inspections/PyMethodMayBeStaticInspection/abc.py | 488 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) according to PEP 3119."""
import types
from _weakrefset import WeakSet
# Instance of old-style class
class _C: pass
_InstanceType = type(_C())
def abstractmethod(funcobj):
"""A decorator indicating abstract methods.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract methods are overridden.
The abstract methods can be called using any of the normal
'super' call mechanisms.
Usage:
class C:
__metaclass__ = ABCMeta
@abstractmethod
def my_abstract_method(self, ...):
...
"""
funcobj.__isabstractmethod__ = True
return funcobj
class abstractproperty(property):
"""A decorator indicating abstract properties.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract properties are overridden.
The abstract properties can be called using any of the normal
'super' call mechanisms.
Usage:
class C:
__metaclass__ = ABCMeta
@abstractproperty
def my_abstract_property(self):
...
This defines a read-only property; you can also define a read-write
abstract property using the 'long' form of property declaration:
class C:
__metaclass__ = ABCMeta
def getx(self): ...
def setx(self, value): ...
x = abstractproperty(getx, setx)
"""
__isabstractmethod__ = True
class ABCMeta(type):
"""Metaclass for defining Abstract Base Classes (ABCs).
Use this metaclass to create an ABC. An ABC can be subclassed
directly, and then acts as a mix-in class. You can also register
unrelated concrete classes (even built-in classes) and unrelated
ABCs as 'virtual subclasses' -- these and their descendants will
be considered subclasses of the registering ABC by the built-in
issubclass() function, but the registering ABC won't show up in
their MRO (Method Resolution Order) nor will method
implementations defined by the registering ABC be callable (not
even via super()).
"""
# A global counter that is incremented each time a class is
# registered as a virtual subclass of anything. It forces the
# negative cache to be cleared before its next use.
_abc_invalidation_counter = 0
def __new__(mcls, name, bases, namespace):
cls = super(ABCMeta, mcls).__new__(mcls, name, bases, namespace)
# Compute set of abstract method names
abstracts = set(name
for name, value in namespace.items()
if getattr(value, "__isabstractmethod__", False))
for base in bases:
for name in getattr(base, "__abstractmethods__", set()):
value = getattr(cls, name, None)
if getattr(value, "__isabstractmethod__", False):
abstracts.add(name)
cls.__abstractmethods__ = frozenset(abstracts)
# Set up inheritance registry
cls._abc_registry = WeakSet()
cls._abc_cache = WeakSet()
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
return cls
def register(cls, subclass):
"""Register a virtual subclass of an ABC."""
if not isinstance(subclass, (type, types.ClassType)):
raise TypeError("Can only register classes")
if issubclass(subclass, cls):
return # Already a subclass
# Subtle: test for cycles *after* testing for "already a subclass";
# this means we allow X.register(X) and interpret it as a no-op.
if issubclass(cls, subclass):
# This would create a cycle, which is bad for the algorithm below
raise RuntimeError("Refusing to create an inheritance cycle")
cls._abc_registry.add(subclass)
ABCMeta._abc_invalidation_counter += 1 # Invalidate negative cache
def _dump_registry(cls, file=None):
"""Debug helper to print the ABC registry."""
print >> file, "Class: %s.%s" % (cls.__module__, cls.__name__)
print >> file, "Inv.counter: %s" % ABCMeta._abc_invalidation_counter
for name in sorted(cls.__dict__.keys()):
if name.startswith("_abc_"):
value = getattr(cls, name)
print >> file, "%s: %r" % (name, value)
def __instancecheck__(cls, instance):
"""Override for isinstance(instance, cls)."""
# Inline the cache checking when it's simple.
subclass = getattr(instance, '__class__', None)
if subclass is not None and subclass in cls._abc_cache:
return True
subtype = type(instance)
# Old-style instances
if subtype is _InstanceType:
subtype = subclass
if subtype is subclass or subclass is None:
if (cls._abc_negative_cache_version ==
ABCMeta._abc_invalidation_counter and
subtype in cls._abc_negative_cache):
return False
# Fall back to the subclass check.
return cls.__subclasscheck__(subtype)
return (cls.__subclasscheck__(subclass) or
cls.__subclasscheck__(subtype))
def __subclasscheck__(cls, subclass):
"""Override for issubclass(subclass, cls)."""
# Check cache
if subclass in cls._abc_cache:
return True
# Check negative cache; may have to invalidate
if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter:
# Invalidate the negative cache
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
elif subclass in cls._abc_negative_cache:
return False
# Check the subclass hook
ok = cls.__subclasshook__(subclass)
if ok is not NotImplemented:
assert isinstance(ok, bool)
if ok:
cls._abc_cache.add(subclass)
else:
cls._abc_negative_cache.add(subclass)
return ok
# Check if it's a direct subclass
if cls in getattr(subclass, '__mro__', ()):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a registered class (recursive)
for rcls in cls._abc_registry:
if issubclass(subclass, rcls):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a subclass (recursive)
for scls in cls.__subclasses__():
if issubclass(subclass, scls):
cls._abc_cache.add(subclass)
return True
# No dice; update negative cache
cls._abc_negative_cache.add(subclass)
return False
|
ondras/TeaJS | refs/heads/master | deps/v8/build/gyp/test/include_dirs/gyptest-all.py | 261 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies use of include_dirs when using an explicit build target of 'all'.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('includes.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('includes.gyp', test.ALL, chdir='relocate/src')
expect = """\
Hello from includes.c
Hello from inc.h
Hello from include1.h
Hello from subdir/inc2/include2.h
Hello from shadow2/shadow.h
"""
test.run_built_executable('includes', stdout=expect, chdir='relocate/src')
if test.format == 'xcode':
chdir='relocate/src/subdir'
else:
chdir='relocate/src'
expect = """\
Hello from subdir/subdir_includes.c
Hello from subdir/inc.h
Hello from include1.h
Hello from subdir/inc2/include2.h
"""
test.run_built_executable('subdir_includes', stdout=expect, chdir=chdir)
test.pass_test()
|
YongseopKim/crosswalk-test-suite | refs/heads/master | webapi/webapi-nfc-w3c-tests/inst.wgt.py | 294 | #!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = ""
PKG_SRC_DIR = ""
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user )
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
if line.find("[" + pkg_name + "]") != -1:
pkgidIndex = line.split().index("pkgid")
test_pkg_id = line.split()[pkgidIndex+1].strip("[]")
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".wgt"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t wgt -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".wgt"):
if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"pkgcmd -i -t wgt -q -p %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
# Do some special copy/delete... steps
'''
(return_code, output) = doRemoteCMD(
"mkdir -p %s/tests" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
if not doRemoteCopy("specname/tests", "%s/tests" % PKG_SRC_DIR):
action_status = False
'''
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "app"
global SRC_DIR, PKG_SRC_DIR
SRC_DIR = "/home/%s/content" % PARAMETERS.user
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0 :
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket"%str(userid)
else:
print "[Error] cmd commands error : %s"%str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
|
qbit/node-gyp | refs/heads/master | gyp/pylib/gyp/generator/ninja_test.py | 260 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the ninja.py file. """
import gyp.generator.ninja as ninja
import unittest
import StringIO
import sys
import TestCommon
class TestPrefixesAndSuffixes(unittest.TestCase):
if sys.platform in ('win32', 'cygwin'):
def test_BinaryNamesWindows(self):
writer = ninja.NinjaWriter('foo', 'wee', '.', '.', 'ninja.build', 'win')
spec = { 'target_name': 'wee' }
self.assertTrue(writer.ComputeOutputFileName(spec, 'executable').
endswith('.exe'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
endswith('.dll'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
endswith('.lib'))
if sys.platform == 'linux2':
def test_BinaryNamesLinux(self):
writer = ninja.NinjaWriter('foo', 'wee', '.', '.', 'ninja.build', 'linux')
spec = { 'target_name': 'wee' }
self.assertTrue('.' not in writer.ComputeOutputFileName(spec,
'executable'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
startswith('lib'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
startswith('lib'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
endswith('.so'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
endswith('.a'))
if __name__ == '__main__':
unittest.main()
|
root-mirror/root | refs/heads/master | tutorials/roofit/rf707_kernelestimation.py | 11 | ## \file
## \ingroup tutorial_roofit
## \notebook
## Special pdf's: using non-parametric (multi-dimensional) kernel estimation pdfs
##
## \macro_code
##
## \date February 2018
## \authors Clemens Lange, Wouter Verkerke (C++ version)
import ROOT
# Create low stats 1D dataset
# -------------------------------------------------------
# Create a toy pdf for sampling
x = ROOT.RooRealVar("x", "x", 0, 20)
p = ROOT.RooPolynomial("p", "p", x, ROOT.RooArgList(ROOT.RooFit.RooConst(
0.01), ROOT.RooFit.RooConst(-0.01), ROOT.RooFit.RooConst(0.0004)))
# Sample 500 events from p
data1 = p.generate(ROOT.RooArgSet(x), 200)
# Create 1D kernel estimation pdf
# ---------------------------------------------------------------
# Create adaptive kernel estimation pdf. In self configuration the input data
# is mirrored over the boundaries to minimize edge effects in distribution
# that do not fall to zero towards the edges
kest1 = ROOT.RooKeysPdf("kest1", "kest1", x, data1,
ROOT.RooKeysPdf.MirrorBoth)
# An adaptive kernel estimation pdf on the same data without mirroring option
# for comparison
kest2 = ROOT.RooKeysPdf("kest2", "kest2", x, data1,
ROOT.RooKeysPdf.NoMirror)
# Adaptive kernel estimation pdf with increased bandwidth scale factor
# (promotes smoothness over detail preservation)
kest3 = ROOT.RooKeysPdf("kest1", "kest1", x, data1,
ROOT.RooKeysPdf.MirrorBoth, 2)
# Plot kernel estimation pdfs with and without mirroring over data
frame = x.frame(
ROOT.RooFit.Title("Adaptive kernel estimation pdf with and w/o mirroring"),
ROOT.RooFit.Bins(20))
data1.plotOn(frame)
kest1.plotOn(frame)
kest2.plotOn(frame, ROOT.RooFit.LineStyle(
ROOT.kDashed), ROOT.RooFit.LineColor(ROOT.kRed))
# Plot kernel estimation pdfs with regular and increased bandwidth
frame2 = x.frame(ROOT.RooFit.Title(
"Adaptive kernel estimation pdf with regular, bandwidth"))
kest1.plotOn(frame2)
kest3.plotOn(frame2, ROOT.RooFit.LineColor(ROOT.kMagenta))
# Create low status 2D dataset
# -------------------------------------------------------
# Construct a 2D toy pdf for sampleing
y = ROOT.RooRealVar("y", "y", 0, 20)
py = ROOT.RooPolynomial("py", "py", y, ROOT.RooArgList(ROOT.RooFit.RooConst(
0.01), ROOT.RooFit.RooConst(0.01), ROOT.RooFit.RooConst(-0.0004)))
pxy = ROOT.RooProdPdf("pxy", "pxy", ROOT.RooArgList(p, py))
data2 = pxy.generate(ROOT.RooArgSet(x, y), 1000)
# Create 2D kernel estimation pdf
# ---------------------------------------------------------------
# Create 2D adaptive kernel estimation pdf with mirroring
kest4 = ROOT.RooNDKeysPdf("kest4", "kest4", ROOT.RooArgList(x, y), data2, "am")
# Create 2D adaptive kernel estimation pdf with mirroring and double
# bandwidth
kest5 = ROOT.RooNDKeysPdf(
"kest5", "kest5", ROOT.RooArgList(
x, y), data2, "am", 2)
# Create a histogram of the data
hh_data = ROOT.RooAbsData.createHistogram(
data2, "hh_data", x, ROOT.RooFit.Binning(10), ROOT.RooFit.YVar(
y, ROOT.RooFit.Binning(10)))
# Create histogram of the 2d kernel estimation pdfs
hh_pdf = kest4.createHistogram("hh_pdf", x, ROOT.RooFit.Binning(
25), ROOT.RooFit.YVar(y, ROOT.RooFit.Binning(25)))
hh_pdf2 = kest5.createHistogram("hh_pdf2", x, ROOT.RooFit.Binning(
25), ROOT.RooFit.YVar(y, ROOT.RooFit.Binning(25)))
hh_pdf.SetLineColor(ROOT.kBlue)
hh_pdf2.SetLineColor(ROOT.kMagenta)
c = ROOT.TCanvas("rf707_kernelestimation",
"rf707_kernelestimation", 800, 800)
c.Divide(2, 2)
c.cd(1)
ROOT.gPad.SetLeftMargin(0.15)
frame.GetYaxis().SetTitleOffset(1.4)
frame.Draw()
c.cd(2)
ROOT.gPad.SetLeftMargin(0.15)
frame2.GetYaxis().SetTitleOffset(1.8)
frame2.Draw()
c.cd(3)
ROOT.gPad.SetLeftMargin(0.15)
hh_data.GetZaxis().SetTitleOffset(1.4)
hh_data.Draw("lego")
c.cd(4)
ROOT.gPad.SetLeftMargin(0.20)
hh_pdf.GetZaxis().SetTitleOffset(2.4)
hh_pdf.Draw("surf")
hh_pdf2.Draw("surfsame")
c.SaveAs("rf707_kernelestimation.png")
|
fx2003/tensorflow-study | refs/heads/master | TensorFlow实战/4-2multilayer_perceptron.py | 1 | from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
mnist = input_data.read_data_sets("MNIST_data/", one_hot = True)
sess = tf.InteractiveSession()
in_units = 784
h1_units = 300
W1 = tf.Variable(tf.truncated_normal([in_units, h1_units], stddev = 0.1))
b1 = tf.Variable(tf.zeros([h1_units]))
W2 = tf.Variable(tf.zeros([h1_units, 10]))
b2 = tf.Variable(tf.zeros([10]))
x = tf.placeholder(tf.float32, [None, in_units])
keep_prob = tf.placeholder(tf.float32)
hidden1 = tf.nn.relu(tf.matmul(x, W1) + b1)
hidden1_drop = tf.nn.dropout(hidden1, keep_prob)
y = tf.nn.softmax(tf.matmul(hidden1_drop, W2) + b2)
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices = [1]))
train_step = tf.train.AdagradOptimizer(0.3).minimize(cross_entropy)
tf.global_variables_initializer().run()
for i in range(3000):
batch_xs, batch_ys = mnist.train.next_batch(100)
train_step.run({x: batch_xs, y_: batch_ys, keep_prob: 0.75})
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(accuracy.eval({x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})) |
fidomason/kbengine | refs/heads/master | kbe/res/scripts/common/Lib/site-packages/pip/_vendor/requests/packages/chardet/charsetprober.py | 3126 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import re
class CharSetProber:
def __init__(self):
pass
def reset(self):
self._mState = constants.eDetecting
def get_charset_name(self):
return None
def feed(self, aBuf):
pass
def get_state(self):
return self._mState
def get_confidence(self):
return 0.0
def filter_high_bit_only(self, aBuf):
aBuf = re.sub(b'([\x00-\x7F])+', b' ', aBuf)
return aBuf
def filter_without_english_letters(self, aBuf):
aBuf = re.sub(b'([A-Za-z])+', b' ', aBuf)
return aBuf
def filter_with_english_letters(self, aBuf):
# TODO
return aBuf
|
adit-chandra/tensorflow | refs/heads/master | tensorflow/python/util/deprecation.py | 12 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensor utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import re
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import decorator_utils
from tensorflow.python.util import is_in_graph_mode
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util import tf_stack
# Allow deprecation warnings to be silenced temporarily with a context manager.
_PRINT_DEPRECATION_WARNINGS = True
# Remember which deprecation warnings have been printed already.
_PRINTED_WARNING = {}
class DeprecatedNamesAlreadySet(Exception):
"""Raised when setting deprecated names multiple times for the same symbol."""
pass
def _add_deprecated_function_notice_to_docstring(doc, date, instructions):
"""Adds a deprecation notice to a docstring for deprecated functions."""
main_text = ['THIS FUNCTION IS DEPRECATED. It will be removed %s.' %
('in a future version' if date is None else ('after %s' % date))]
if instructions:
main_text.append('Instructions for updating:')
return decorator_utils.add_notice_to_docstring(
doc, instructions,
'DEPRECATED FUNCTION',
'(deprecated)', main_text)
def _add_deprecated_arg_notice_to_docstring(doc, date, instructions,
deprecated_names):
"""Adds a deprecation notice to a docstring for deprecated arguments."""
deprecation_string = ', '.join(sorted(deprecated_names))
return decorator_utils.add_notice_to_docstring(
doc, instructions, 'DEPRECATED FUNCTION ARGUMENTS',
'(deprecated arguments)', [
'SOME ARGUMENTS ARE DEPRECATED: `(%s)`. '
'They will be removed %s.' %
(deprecation_string, 'in a future version' if date is None else
('after %s' % date)), 'Instructions for updating:'
])
def _add_deprecated_arg_value_notice_to_docstring(doc, date, instructions,
deprecated_name_value_dict):
"""Adds a deprecation notice to a docstring for deprecated arguments."""
deprecation_string = ', '.join(
'%s=%r' % (key, value)
for key, value in sorted(deprecated_name_value_dict.items()))
when = 'in a future version' if date is None else ('after %s' % date)
return decorator_utils.add_notice_to_docstring(
doc, instructions, 'DEPRECATED FUNCTION ARGUMENT VALUES',
'(deprecated argument values)', [
'SOME ARGUMENT VALUES ARE DEPRECATED: `(%s)`. '
'They will be removed %s.' % (deprecation_string, when),
'Instructions for updating:'
])
def _validate_deprecation_args(date, instructions):
if date is not None and not re.match(r'20\d\d-[01]\d-[0123]\d', date):
raise ValueError('Date must be YYYY-MM-DD.')
if not instructions:
raise ValueError('Don\'t deprecate things without conversion instructions!')
def _call_location(outer=False):
"""Returns call location given level up from current call."""
stack = tf_stack.extract_stack(limit=4)
length = len(stack)
if length == 0: # should never happen as we're in a function
return 'UNKNOWN'
index = length-4 if outer else length-3
if index < 0:
index = 0
frame = stack[index]
return '{}:{}'.format(frame.filename, frame.lineno)
def _wrap_decorator(wrapped_function):
"""Indicate that one function wraps another.
This decorator wraps a function using `tf_decorator.make_decorator`
so that doc generation scripts can pick up original function
signature.
It would be better to use @functools.wrap decorator, but it would
not update function signature to match wrapped function in Python 2.
Args:
wrapped_function: The function that decorated function wraps.
Returns:
Function that accepts wrapper function as an argument and returns
`TFDecorator` instance.
"""
def wrapper(wrapper_func):
return tf_decorator.make_decorator(wrapped_function, wrapper_func)
return wrapper
def deprecated_alias(deprecated_name, name, func_or_class, warn_once=True):
"""Deprecate a symbol in favor of a new name with identical semantics.
This function is meant to be used when defining a backwards-compatibility
alias for a symbol which has been moved. For example:
module1.py:
```python
class NewNameForClass: pass
```
module2.py:
```python
import module1
DeprecatedNameForClass = deprecated_alias(
deprecated_name='module2.DeprecatedNameForClass',
name='module1.NewNameForClass',
func_or_class=module1.NewNameForClass)
```
This function works for classes and functions.
For classes, it creates a new class which is functionally identical (it
inherits from the original, and overrides its constructor), but which prints
a deprecation warning when an instance is created. It also adds a deprecation
notice to the class' docstring.
For functions, it returns a function wrapped by `tf_decorator.make_decorator`.
That function prints a warning when used, and has a deprecation notice in its
docstring. This is more or less equivalent (the deprecation warning has
slightly different text) to writing:
```python
@deprecated
def deprecated_alias(original_args):
real_function(original_args)
```
Args:
deprecated_name: The name of the symbol that is being deprecated, to be used
in the warning message. This should be its fully qualified name to avoid
confusion.
name: The name of the symbol that is to be used instead of the deprecated
name. This should be a fully qualified name to avoid confusion.
func_or_class: The (non-deprecated) class or function for which a deprecated
alias should be created.
warn_once: If True (the default), only print a deprecation warning the first
time this function is used, or the class is instantiated.
Returns:
A wrapped version of `func_or_class` which prints a deprecation warning on
use and has a modified docstring.
"""
if tf_inspect.isclass(func_or_class):
# Make a new class with __init__ wrapped in a warning.
class _NewClass(func_or_class): # pylint: disable=missing-docstring
__doc__ = decorator_utils.add_notice_to_docstring(
func_or_class.__doc__, 'Please use %s instead.' % name,
'DEPRECATED CLASS',
'(deprecated)', ['THIS CLASS IS DEPRECATED. '
'It will be removed in a future version. '])
__name__ = func_or_class.__name__
__module__ = _call_location(outer=True)
@_wrap_decorator(func_or_class.__init__)
def __init__(self, *args, **kwargs):
if hasattr(_NewClass.__init__, '__func__'):
# Python 2
_NewClass.__init__.__func__.__doc__ = func_or_class.__init__.__doc__
else:
# Python 3
_NewClass.__init__.__doc__ = func_or_class.__init__.__doc__
if _PRINT_DEPRECATION_WARNINGS:
# We're making the alias as we speak. The original may have other
# aliases, so we cannot use it to check for whether it's already been
# warned about.
if _NewClass.__init__ not in _PRINTED_WARNING:
if warn_once:
_PRINTED_WARNING[_NewClass.__init__] = True
logging.warning(
'From %s: The name %s is deprecated. Please use %s instead.\n',
_call_location(), deprecated_name, name)
super(_NewClass, self).__init__(*args, **kwargs)
return _NewClass
else:
decorator_utils.validate_callable(func_or_class, 'deprecated')
# Make a wrapper for the original
@functools.wraps(func_or_class)
def new_func(*args, **kwargs): # pylint: disable=missing-docstring
if _PRINT_DEPRECATION_WARNINGS:
# We're making the alias as we speak. The original may have other
# aliases, so we cannot use it to check for whether it's already been
# warned about.
if new_func not in _PRINTED_WARNING:
if warn_once:
_PRINTED_WARNING[new_func] = True
logging.warning(
'From %s: The name %s is deprecated. Please use %s instead.\n',
_call_location(), deprecated_name, name)
return func_or_class(*args, **kwargs)
return tf_decorator.make_decorator(
func_or_class, new_func, 'deprecated',
_add_deprecated_function_notice_to_docstring(
func_or_class.__doc__, None, 'Please use %s instead.' % name))
def deprecated_endpoints(*args):
"""Decorator for marking endpoints deprecated.
This decorator does not print deprecation messages.
TODO(annarev): eventually start printing deprecation warnings when
@deprecation_endpoints decorator is added.
Args:
*args: Deprecated endpoint names.
Returns:
A function that takes symbol as an argument and adds
_tf_deprecated_api_names to that symbol.
_tf_deprecated_api_names would be set to a list of deprecated
endpoint names for the symbol.
"""
def deprecated_wrapper(func):
# pylint: disable=protected-access
if '_tf_deprecated_api_names' in func.__dict__:
raise DeprecatedNamesAlreadySet(
'Cannot set deprecated names for %s to %s. '
'Deprecated names are already set to %s.' % (
func.__name__, str(args), str(func._tf_deprecated_api_names)))
func._tf_deprecated_api_names = args
# pylint: disable=protected-access
return func
return deprecated_wrapper
def deprecated(date, instructions, warn_once=True):
"""Decorator for marking functions or methods deprecated.
This decorator logs a deprecation warning whenever the decorated function is
called. It has the following format:
<function> (from <module>) is deprecated and will be removed after <date>.
Instructions for updating:
<instructions>
If `date` is None, 'after <date>' is replaced with 'in a future version'.
<function> will include the class name if it is a method.
It also edits the docstring of the function: ' (deprecated)' is appended
to the first line of the docstring and a deprecation notice is prepended
to the rest of the docstring.
Args:
date: String or None. The date the function is scheduled to be removed.
Must be ISO 8601 (YYYY-MM-DD), or None.
instructions: String. Instructions on how to update code using the
deprecated function.
warn_once: Boolean. Set to `True` to warn only the first time the decorated
function is called. Otherwise, every call will log a warning.
Returns:
Decorated function or method.
Raises:
ValueError: If date is not None or in ISO 8601 format, or instructions are
empty.
"""
_validate_deprecation_args(date, instructions)
def deprecated_wrapper(func):
"""Deprecation wrapper."""
decorator_utils.validate_callable(func, 'deprecated')
@functools.wraps(func)
def new_func(*args, **kwargs): # pylint: disable=missing-docstring
if _PRINT_DEPRECATION_WARNINGS:
if func not in _PRINTED_WARNING:
if warn_once:
_PRINTED_WARNING[func] = True
logging.warning(
'From %s: %s (from %s) is deprecated and will be removed %s.\n'
'Instructions for updating:\n%s',
_call_location(), decorator_utils.get_qualified_name(func),
func.__module__,
'in a future version' if date is None else ('after %s' % date),
instructions)
return func(*args, **kwargs)
return tf_decorator.make_decorator(
func, new_func, 'deprecated',
_add_deprecated_function_notice_to_docstring(func.__doc__, date,
instructions))
return deprecated_wrapper
DeprecatedArgSpec = collections.namedtuple(
'DeprecatedArgSpec', ['position', 'has_ok_value', 'ok_value'])
def deprecated_args(date, instructions, *deprecated_arg_names_or_tuples,
**kwargs):
"""Decorator for marking specific function arguments as deprecated.
This decorator logs a deprecation warning whenever the decorated function is
called with the deprecated argument. It has the following format:
Calling <function> (from <module>) with <arg> is deprecated and will be
removed after <date>. Instructions for updating:
<instructions>
If `date` is None, 'after <date>' is replaced with 'in a future version'.
<function> includes the class name if it is a method.
It also edits the docstring of the function: ' (deprecated arguments)' is
appended to the first line of the docstring and a deprecation notice is
prepended to the rest of the docstring.
Args:
date: String or None. The date the function is scheduled to be removed.
Must be ISO 8601 (YYYY-MM-DD), or None.
instructions: String. Instructions on how to update code using the
deprecated function.
*deprecated_arg_names_or_tuples: String or 2-Tuple(String,
[ok_vals]). The string is the deprecated argument name.
Optionally, an ok-value may be provided. If the user provided
argument equals this value, the warning is suppressed.
**kwargs: If `warn_once=False` is passed, every call with a deprecated
argument will log a warning. The default behavior is to only warn the
first time the function is called with any given deprecated argument.
All other kwargs raise `ValueError`.
Returns:
Decorated function or method.
Raises:
ValueError: If date is not None or in ISO 8601 format, instructions are
empty, the deprecated arguments are not present in the function
signature, the second element of a deprecated_tuple is not a
list, or if a kwarg other than `warn_once` is passed.
"""
_validate_deprecation_args(date, instructions)
if not deprecated_arg_names_or_tuples:
raise ValueError('Specify which argument is deprecated.')
if kwargs and list(kwargs.keys()) != ['warn_once']:
kwargs.pop('warn_once', None)
raise ValueError('Illegal argument to deprecated_args: %s' % kwargs)
warn_once = kwargs.get('warn_once', True)
def _get_arg_names_to_ok_vals():
"""Returns a dict mapping arg_name to DeprecatedArgSpec w/o position."""
d = {}
for name_or_tuple in deprecated_arg_names_or_tuples:
if isinstance(name_or_tuple, tuple):
d[name_or_tuple[0]] = DeprecatedArgSpec(-1, True, name_or_tuple[1])
else:
d[name_or_tuple] = DeprecatedArgSpec(-1, False, None)
return d
def _get_deprecated_positional_arguments(names_to_ok_vals, arg_spec):
"""Builds a dictionary from deprecated arguments to their spec.
Returned dict is keyed by argument name.
Each value is a DeprecatedArgSpec with the following fields:
position: The zero-based argument position of the argument
within the signature. None if the argument isn't found in
the signature.
ok_values: Values of this argument for which warning will be
suppressed.
Args:
names_to_ok_vals: dict from string arg_name to a list of values,
possibly empty, which should not elicit a warning.
arg_spec: Output from tf_inspect.getfullargspec on the called function.
Returns:
Dictionary from arg_name to DeprecatedArgSpec.
"""
arg_name_to_pos = {
name: pos for pos, name in enumerate(arg_spec.args)}
deprecated_positional_args = {}
for arg_name, spec in iter(names_to_ok_vals.items()):
if arg_name in arg_name_to_pos:
pos = arg_name_to_pos[arg_name]
deprecated_positional_args[arg_name] = DeprecatedArgSpec(
pos, spec.has_ok_value, spec.ok_value)
return deprecated_positional_args
deprecated_arg_names = _get_arg_names_to_ok_vals()
def deprecated_wrapper(func):
"""Deprecation decorator."""
decorator_utils.validate_callable(func, 'deprecated_args')
arg_spec = tf_inspect.getfullargspec(func)
deprecated_positions = _get_deprecated_positional_arguments(
deprecated_arg_names, arg_spec)
is_varargs_deprecated = arg_spec.varargs in deprecated_arg_names
is_kwargs_deprecated = arg_spec.varkw in deprecated_arg_names
if (len(deprecated_positions) + is_varargs_deprecated + is_kwargs_deprecated
!= len(deprecated_arg_names_or_tuples)):
known_args = arg_spec.args + [arg_spec.varargs, arg_spec.varkw]
missing_args = [arg_name for arg_name in deprecated_arg_names
if arg_name not in known_args]
raise ValueError('The following deprecated arguments are not present '
'in the function signature: %s. '
'Found next arguments: %s.' % (missing_args, known_args))
def _same_value(a, b):
"""A comparison operation that works for multiple object types.
Returns True for two empty lists, two numeric values with the
same value, etc.
Returns False for (pd.DataFrame, None), and other pairs which
should not be considered equivalent.
Args:
a: value one of the comparison.
b: value two of the comparison.
Returns:
A boolean indicating whether the two inputs are the same value
for the purposes of deprecation.
"""
if a is b:
return True
try:
equality = a == b
if isinstance(equality, bool):
return equality
except TypeError:
return False
return False
@functools.wraps(func)
def new_func(*args, **kwargs):
"""Deprecation wrapper."""
# TODO(apassos) figure out a way to have reasonable performance with
# deprecation warnings and eager mode.
if is_in_graph_mode.IS_IN_GRAPH_MODE() and _PRINT_DEPRECATION_WARNINGS:
invalid_args = []
named_args = tf_inspect.getcallargs(func, *args, **kwargs)
for arg_name, spec in iter(deprecated_positions.items()):
if (spec.position < len(args) and
not (spec.has_ok_value and
_same_value(named_args[arg_name], spec.ok_value))):
invalid_args.append(arg_name)
if is_varargs_deprecated and len(args) > len(arg_spec.args):
invalid_args.append(arg_spec.varargs)
if is_kwargs_deprecated and kwargs:
invalid_args.append(arg_spec.varkw)
for arg_name in deprecated_arg_names:
if (arg_name in kwargs and
not (deprecated_positions[arg_name].has_ok_value and
_same_value(named_args[arg_name],
deprecated_positions[arg_name].ok_value))):
invalid_args.append(arg_name)
for arg_name in invalid_args:
if (func, arg_name) not in _PRINTED_WARNING:
if warn_once:
_PRINTED_WARNING[(func, arg_name)] = True
logging.warning(
'From %s: calling %s (from %s) with %s is deprecated and will '
'be removed %s.\nInstructions for updating:\n%s',
_call_location(), decorator_utils.get_qualified_name(func),
func.__module__, arg_name,
'in a future version' if date is None else ('after %s' % date),
instructions)
return func(*args, **kwargs)
doc = _add_deprecated_arg_notice_to_docstring(
func.__doc__, date, instructions, sorted(deprecated_arg_names.keys()))
return tf_decorator.make_decorator(func, new_func, 'deprecated', doc)
return deprecated_wrapper
def deprecated_arg_values(date, instructions, warn_once=True,
**deprecated_kwargs):
"""Decorator for marking specific function argument values as deprecated.
This decorator logs a deprecation warning whenever the decorated function is
called with the deprecated argument values. It has the following format:
Calling <function> (from <module>) with <arg>=<value> is deprecated and
will be removed after <date>. Instructions for updating:
<instructions>
If `date` is None, 'after <date>' is replaced with 'in a future version'.
<function> will include the class name if it is a method.
It also edits the docstring of the function: ' (deprecated arguments)' is
appended to the first line of the docstring and a deprecation notice is
prepended to the rest of the docstring.
Args:
date: String or None. The date the function is scheduled to be removed.
Must be ISO 8601 (YYYY-MM-DD), or None
instructions: String. Instructions on how to update code using the
deprecated function.
warn_once: If `True`, warn only the first time this function is called with
deprecated argument values. Otherwise, every call (with a deprecated
argument value) will log a warning.
**deprecated_kwargs: The deprecated argument values.
Returns:
Decorated function or method.
Raises:
ValueError: If date is not None or in ISO 8601 format, or instructions are
empty.
"""
_validate_deprecation_args(date, instructions)
if not deprecated_kwargs:
raise ValueError('Specify which argument values are deprecated.')
def deprecated_wrapper(func):
"""Deprecation decorator."""
decorator_utils.validate_callable(func, 'deprecated_arg_values')
@functools.wraps(func)
def new_func(*args, **kwargs):
"""Deprecation wrapper."""
if _PRINT_DEPRECATION_WARNINGS:
named_args = tf_inspect.getcallargs(func, *args, **kwargs)
for arg_name, arg_value in deprecated_kwargs.items():
if arg_name in named_args and named_args[arg_name] == arg_value:
if (func, arg_name) not in _PRINTED_WARNING:
if warn_once:
_PRINTED_WARNING[(func, arg_name)] = True
logging.warning(
'From %s: calling %s (from %s) with %s=%s is deprecated and '
'will be removed %s.\nInstructions for updating:\n%s',
_call_location(), decorator_utils.get_qualified_name(func),
func.__module__, arg_name, arg_value, 'in a future version'
if date is None else ('after %s' % date), instructions)
return func(*args, **kwargs)
doc = _add_deprecated_arg_value_notice_to_docstring(
func.__doc__, date, instructions, deprecated_kwargs)
return tf_decorator.make_decorator(func, new_func, 'deprecated', doc)
return deprecated_wrapper
def deprecated_argument_lookup(new_name, new_value, old_name, old_value):
"""Looks up deprecated argument name and ensures both are not used.
Args:
new_name: new name of argument
new_value: value of new argument (or None if not used)
old_name: old name of argument
old_value: value of old argument (or None if not used)
Returns:
The effective argument that should be used.
Raises:
ValueError: if new_value and old_value are both non-null
"""
if old_value is not None:
if new_value is not None:
raise ValueError("Cannot specify both '%s' and '%s'" %
(old_name, new_name))
return old_value
return new_value
def rewrite_argument_docstring(old_doc, old_argument, new_argument):
return old_doc.replace('`%s`' % old_argument, '`%s`' % new_argument).replace(
'%s:' % old_argument, '%s:' % new_argument)
@tf_contextlib.contextmanager
def silence():
"""Temporarily silence deprecation warnings."""
global _PRINT_DEPRECATION_WARNINGS
print_deprecation_warnings = _PRINT_DEPRECATION_WARNINGS
_PRINT_DEPRECATION_WARNINGS = False
yield
_PRINT_DEPRECATION_WARNINGS = print_deprecation_warnings
class HiddenTfApiAttribute(property):
"""Hides a class attribute from the public API.
Attributes in public classes can be hidden from the API by having an '_' in
front of the name (e.g. ClassName._variables). This doesn't work when
attributes or methods are inherited from a parent class. To hide inherited
attributes, set their values to be `deprecation.hide_attribute_from_api`.
For example, this is used in V2 Estimator to hide the deprecated
export_savedmodel method:
class EstimatorV2(Estimator):
export_savedmodel = deprecation.hide_attribute_from_api('...')
"""
def __init__(self, deprecation_message):
def raise_error(unused_self):
raise AttributeError(deprecation_message)
super(HiddenTfApiAttribute, self).__init__(raise_error)
hide_attribute_from_api = HiddenTfApiAttribute # pylint: disable=invalid-name
# TODO(kathywu): Remove once cl/246395236 is submitted.
HIDDEN_ATTRIBUTE = HiddenTfApiAttribute('This attribute has been deprecated.')
|
fangxingli/hue | refs/heads/master | desktop/core/ext-py/guppy-0.1.10/guppy/etc/__init__.py | 37 | #._cv_part guppy.etc
class _GLUECLAMP_:
def _get_iterpermute(self): return self.IterPermute.iterpermute
def _get_unpack(self): return self.Unpack.unpack
|
ktok07b6/polyphony | refs/heads/master | tests/error/seq_capacity01.py | 1 | #Sequence capacity is overflowing
from polyphony import testbench
from polyphony.typing import List, bit
def seq_capacity01(xs:List[bit][4]):
return xs[7]
@testbench
def test():
data = [0, 1, 1, 0, 1] # type: List[bit][4]
seq_capacity01(data)
test()
|
bwasti/caffe2 | refs/heads/master | caffe2/python/predictor/mobile_exporter_test.py | 1 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python.test_util import TestCase
from caffe2.python import workspace, brew
from caffe2.python.model_helper import ModelHelper
from caffe2.python.predictor import mobile_exporter
import numpy as np
class TestMobileExporter(TestCase):
def test_mobile_exporter(self):
model = ModelHelper(name="mobile_exporter_test_model")
# Test LeNet
brew.conv(model, 'data', 'conv1', dim_in=1, dim_out=20, kernel=5)
brew.max_pool(model, 'conv1', 'pool1', kernel=2, stride=2)
brew.conv(model, 'pool1', 'conv2', dim_in=20, dim_out=50, kernel=5)
brew.max_pool(model, 'conv2', 'pool2', kernel=2, stride=2)
brew.fc(model, 'pool2', 'fc3', dim_in=50 * 4 * 4, dim_out=500)
brew.relu(model, 'fc3', 'fc3')
brew.fc(model, 'fc3', 'pred', 500, 10)
brew.softmax(model, 'pred', 'out')
# Create our mobile exportable networks
workspace.RunNetOnce(model.param_init_net)
init_net, predict_net = mobile_exporter.Export(
workspace, model.net, model.params
)
# Populate the workspace with data
np_data = np.random.rand(1, 1, 28, 28).astype(np.float32)
workspace.FeedBlob("data", np_data)
workspace.CreateNet(model.net)
workspace.RunNet(model.net)
ref_out = workspace.FetchBlob("out")
# Clear the workspace
workspace.ResetWorkspace()
# Populate the workspace with data
workspace.RunNetOnce(init_net)
# Fake "data" is populated by init_net, we have to replace it
workspace.FeedBlob("data", np_data)
# Overwrite the old net
workspace.CreateNet(predict_net, True)
workspace.RunNet(predict_net.name)
manual_run_out = workspace.FetchBlob("out")
np.testing.assert_allclose(
ref_out, manual_run_out, atol=1e-10, rtol=1e-10
)
# Clear the workspace
workspace.ResetWorkspace()
# Predictor interface test (simulates writing to disk)
predictor = workspace.Predictor(
init_net.SerializeToString(), predict_net.SerializeToString()
)
# Output is a vector of outputs but we only care about the first and only result
predictor_out = predictor.run([np_data])
assert len(predictor_out) == 1
predictor_out = predictor_out[0]
np.testing.assert_allclose(
ref_out, predictor_out, atol=1e-10, rtol=1e-10
)
|
HP-Scale-out-Storage/libstoragemgmt | refs/heads/master | python_binding/lsm/_pluginrunner.py | 2 | # Copyright (C) 2011-2013 Red Hat, Inc.
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; If not, see <http://www.gnu.org/licenses/>.
#
# Author: tasleson
import socket
import traceback
import sys
from _common import SocketEOF as _SocketEOF
from lsm import LsmError, error, ErrorNumber
import _transport
from lsm.lsmcli import cmd_line_wrapper
def search_property(lsm_objs, search_key, search_value):
"""
This method does not check whether lsm_obj contain requested property.
The method caller should do the check.
"""
if search_key is None:
return lsm_objs
return list(lsm_obj for lsm_obj in lsm_objs
if getattr(lsm_obj, search_key) == search_value)
class PluginRunner(object):
"""
Plug-in side common code which uses the passed in plugin to do meaningful
work.
"""
@staticmethod
def _is_number(val):
"""
Returns True if val is an integer.
"""
try:
int(val)
return True
except ValueError:
return False
def __init__(self, plugin, args):
self.cmdline = False
if len(args) == 2 and PluginRunner._is_number(args[1]):
try:
fd = int(args[1])
self.tp = _transport.TransPort(
socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM))
#At this point we can return errors to the client, so we can
#inform the client if the plug-in fails to create itself
try:
self.plugin = plugin()
except Exception as e:
exception_info = sys.exc_info()
self.tp.send_error(0, -32099,
'Error instantiating plug-in ' + str(e))
raise exception_info[1], None, exception_info[2]
except Exception:
error(traceback.format_exc())
error('Plug-in exiting.')
sys.exit(2)
else:
self.cmdline = True
cmd_line_wrapper(plugin)
def run(self):
#Don't need to invoke this when running stand alone as a cmdline
if self.cmdline:
return
need_shutdown = False
msg_id = 0
try:
while True:
try:
#result = None
msg = self.tp.read_req()
method = msg['method']
msg_id = msg['id']
params = msg['params']
#Check to see if this plug-in implements this operation
#if not return the expected error.
if hasattr(self.plugin, method):
if params is None:
result = getattr(self.plugin, method)()
else:
result = getattr(self.plugin, method)(
**msg['params'])
else:
raise LsmError(ErrorNumber.NO_SUPPORT,
"Unsupported operation")
self.tp.send_resp(result)
if method == 'plugin_register':
need_shutdown = True
if method == 'plugin_unregister':
#This is a graceful plugin_unregister
need_shutdown = False
self.tp.close()
break
except ValueError as ve:
error(traceback.format_exc())
self.tp.send_error(msg_id, -32700, str(ve))
except AttributeError as ae:
error(traceback.format_exc())
self.tp.send_error(msg_id, -32601, str(ae))
except LsmError as lsm_err:
self.tp.send_error(msg_id, lsm_err.code, lsm_err.msg,
lsm_err.data)
except _SocketEOF:
#Client went away and didn't meet our expectations for protocol,
#this error message should not be seen as it shouldn't be occuring.
if need_shutdown:
error('Client went away, exiting plug-in')
except Exception:
error("Unhandled exception in plug-in!\n" + traceback.format_exc())
try:
self.tp.send_error(msg_id, ErrorNumber.PLUGIN_BUG,
"Unhandled exception in plug-in",
str(traceback.format_exc()))
except Exception:
pass
finally:
if need_shutdown:
#Client wasn't nice, we will allow plug-in to cleanup
self.plugin.plugin_unregister()
sys.exit(2)
|
adrianholovaty/django | refs/heads/master | tests/regressiontests/fixtures_regress/models.py | 33 | from __future__ import absolute_import
from django.contrib.auth.models import User
from django.db import models
class Animal(models.Model):
name = models.CharField(max_length=150)
latin_name = models.CharField(max_length=150)
count = models.IntegerField()
weight = models.FloatField()
# use a non-default name for the default manager
specimens = models.Manager()
def __unicode__(self):
return self.name
class Plant(models.Model):
name = models.CharField(max_length=150)
class Meta:
# For testing when upper case letter in app name; regression for #4057
db_table = "Fixtures_regress_plant"
class Stuff(models.Model):
name = models.CharField(max_length=20, null=True)
owner = models.ForeignKey(User, null=True)
def __unicode__(self):
return unicode(self.name) + u' is owned by ' + unicode(self.owner)
class Absolute(models.Model):
name = models.CharField(max_length=40)
load_count = 0
def __init__(self, *args, **kwargs):
super(Absolute, self).__init__(*args, **kwargs)
Absolute.load_count += 1
class Parent(models.Model):
name = models.CharField(max_length=10)
class Meta:
ordering = ('id',)
class Child(Parent):
data = models.CharField(max_length=10)
# Models to regression test #7572
class Channel(models.Model):
name = models.CharField(max_length=255)
class Article(models.Model):
title = models.CharField(max_length=255)
channels = models.ManyToManyField(Channel)
class Meta:
ordering = ('id',)
# Models to regression test #11428
class Widget(models.Model):
name = models.CharField(max_length=255)
class Meta:
ordering = ('name',)
def __unicode__(self):
return self.name
class WidgetProxy(Widget):
class Meta:
proxy = True
# Check for forward references in FKs and M2Ms with natural keys
class TestManager(models.Manager):
def get_by_natural_key(self, key):
return self.get(name=key)
class Store(models.Model):
objects = TestManager()
name = models.CharField(max_length=255)
class Meta:
ordering = ('name',)
def __unicode__(self):
return self.name
def natural_key(self):
return (self.name,)
class Person(models.Model):
objects = TestManager()
name = models.CharField(max_length=255)
class Meta:
ordering = ('name',)
def __unicode__(self):
return self.name
# Person doesn't actually have a dependency on store, but we need to define
# one to test the behavior of the dependency resolution algorithm.
def natural_key(self):
return (self.name,)
natural_key.dependencies = ['fixtures_regress.store']
class Book(models.Model):
name = models.CharField(max_length=255)
author = models.ForeignKey(Person)
stores = models.ManyToManyField(Store)
class Meta:
ordering = ('name',)
def __unicode__(self):
return u'%s by %s (available at %s)' % (
self.name,
self.author.name,
', '.join(s.name for s in self.stores.all())
)
class NKManager(models.Manager):
def get_by_natural_key(self, data):
return self.get(data=data)
class NKChild(Parent):
data = models.CharField(max_length=10, unique=True)
objects = NKManager()
def natural_key(self):
return self.data
def __unicode__(self):
return u'NKChild %s:%s' % (self.name, self.data)
class RefToNKChild(models.Model):
text = models.CharField(max_length=10)
nk_fk = models.ForeignKey(NKChild, related_name='ref_fks')
nk_m2m = models.ManyToManyField(NKChild, related_name='ref_m2ms')
def __unicode__(self):
return u'%s: Reference to %s [%s]' % (
self.text,
self.nk_fk,
', '.join(str(o) for o in self.nk_m2m.all())
)
# ome models with pathological circular dependencies
class Circle1(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle2']
class Circle2(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle1']
class Circle3(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle3']
class Circle4(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle5']
class Circle5(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle6']
class Circle6(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle4']
class ExternalDependency(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.book']
# Model for regression test of #11101
class Thingy(models.Model):
name = models.CharField(max_length=255)
|
AICP/kernel_lge_msm8974 | refs/heads/mm6.0 | tools/perf/scripts/python/netdev-times.py | 11271 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
|
gonboy/sl4a | refs/heads/master | python/src/Lib/test/test_stringprep.py | 58 | # To fully test this module, we would need a copy of the stringprep tables.
# Since we don't have them, this test checks only a few codepoints.
import unittest
from test import test_support
from stringprep import *
class StringprepTests(unittest.TestCase):
def test(self):
self.failUnless(in_table_a1(u"\u0221"))
self.failIf(in_table_a1(u"\u0222"))
self.failUnless(in_table_b1(u"\u00ad"))
self.failIf(in_table_b1(u"\u00ae"))
self.failUnless(map_table_b2(u"\u0041"), u"\u0061")
self.failUnless(map_table_b2(u"\u0061"), u"\u0061")
self.failUnless(map_table_b3(u"\u0041"), u"\u0061")
self.failUnless(map_table_b3(u"\u0061"), u"\u0061")
self.failUnless(in_table_c11(u"\u0020"))
self.failIf(in_table_c11(u"\u0021"))
self.failUnless(in_table_c12(u"\u00a0"))
self.failIf(in_table_c12(u"\u00a1"))
self.failUnless(in_table_c12(u"\u00a0"))
self.failIf(in_table_c12(u"\u00a1"))
self.failUnless(in_table_c11_c12(u"\u00a0"))
self.failIf(in_table_c11_c12(u"\u00a1"))
self.failUnless(in_table_c21(u"\u001f"))
self.failIf(in_table_c21(u"\u0020"))
self.failUnless(in_table_c22(u"\u009f"))
self.failIf(in_table_c22(u"\u00a0"))
self.failUnless(in_table_c21_c22(u"\u009f"))
self.failIf(in_table_c21_c22(u"\u00a0"))
self.failUnless(in_table_c3(u"\ue000"))
self.failIf(in_table_c3(u"\uf900"))
self.failUnless(in_table_c4(u"\uffff"))
self.failIf(in_table_c4(u"\u0000"))
self.failUnless(in_table_c5(u"\ud800"))
self.failIf(in_table_c5(u"\ud7ff"))
self.failUnless(in_table_c6(u"\ufff9"))
self.failIf(in_table_c6(u"\ufffe"))
self.failUnless(in_table_c7(u"\u2ff0"))
self.failIf(in_table_c7(u"\u2ffc"))
self.failUnless(in_table_c8(u"\u0340"))
self.failIf(in_table_c8(u"\u0342"))
# C.9 is not in the bmp
# self.failUnless(in_table_c9(u"\U000E0001"))
# self.failIf(in_table_c8(u"\U000E0002"))
self.failUnless(in_table_d1(u"\u05be"))
self.failIf(in_table_d1(u"\u05bf"))
self.failUnless(in_table_d2(u"\u0041"))
self.failIf(in_table_d2(u"\u0040"))
# This would generate a hash of all predicates. However, running
# it is quite expensive, and only serves to detect changes in the
# unicode database. Instead, stringprep.py asserts the version of
# the database.
# import hashlib
# predicates = [k for k in dir(stringprep) if k.startswith("in_table")]
# predicates.sort()
# for p in predicates:
# f = getattr(stringprep, p)
# # Collect all BMP code points
# data = ["0"] * 0x10000
# for i in range(0x10000):
# if f(unichr(i)):
# data[i] = "1"
# data = "".join(data)
# h = hashlib.sha1()
# h.update(data)
# print p, h.hexdigest()
def test_main():
test_support.run_unittest(StringprepTests)
if __name__ == '__main__':
test_main()
|
surhudm/scipy | refs/heads/master | scipy/optimize/tests/test_hungarian.py | 48 | # Author: Brian M. Clapper, G. Varoquaux, Lars Buitinck
# License: BSD
from numpy.testing import assert_array_equal, assert_raises
import numpy as np
from scipy.optimize import linear_sum_assignment
def test_linear_sum_assignment():
for cost_matrix, expected_cost in [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
[150, 400, 300]
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
[150, 2, 300]),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
[10, 1, 7]),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
[10, 1, 4]),
# n == 2, m == 0 matrix
([[], []],
[]),
]:
cost_matrix = np.array(cost_matrix)
row_ind, col_ind = linear_sum_assignment(cost_matrix)
assert_array_equal(row_ind, np.sort(row_ind))
assert_array_equal(expected_cost, cost_matrix[row_ind, col_ind])
cost_matrix = cost_matrix.T
row_ind, col_ind = linear_sum_assignment(cost_matrix)
assert_array_equal(row_ind, np.sort(row_ind))
assert_array_equal(np.sort(expected_cost),
np.sort(cost_matrix[row_ind, col_ind]))
def test_linear_sum_assignment_input_validation():
assert_raises(ValueError, linear_sum_assignment, [1, 2, 3])
C = [[1, 2, 3], [4, 5, 6]]
assert_array_equal(linear_sum_assignment(C),
linear_sum_assignment(np.asarray(C)))
assert_array_equal(linear_sum_assignment(C),
linear_sum_assignment(np.matrix(C)))
|
emanuelvianna/microcelery | refs/heads/master | microcelery/microcelery.py | 2 | # coding: utf8
from redis import Redis
import settings
from task import Task
class MicroCelery(object):
def __init__(self):
self._tasks = {}
self.rds = Redis()
def task(self, func):
task = Task(self, func)
self._tasks[func.__name__] = task
return task
def add_task(self, data):
self.rds.rpush(settings.QUEUE_KEY, data)
|
KublaikhanGeek/scrapy | refs/heads/master | scrapy/utils/request.py | 44 | """
This module provides some useful functions for working with
scrapy.http.Request objects
"""
from __future__ import print_function
import hashlib
import weakref
from six.moves.urllib.parse import urlunparse
from w3lib.http import basic_auth_header
from scrapy.utils.python import to_bytes, to_native_str
from scrapy.utils.url import canonicalize_url
from scrapy.utils.httpobj import urlparse_cached
_fingerprint_cache = weakref.WeakKeyDictionary()
def request_fingerprint(request, include_headers=None):
"""
Return the request fingerprint.
The request fingerprint is a hash that uniquely identifies the resource the
request points to. For example, take the following two urls:
http://www.example.com/query?id=111&cat=222
http://www.example.com/query?cat=222&id=111
Even though those are two different URLs both point to the same resource
and are equivalent (ie. they should return the same response).
Another example are cookies used to store session ids. Suppose the
following page is only accesible to authenticated users:
http://www.example.com/members/offers.html
Lot of sites use a cookie to store the session id, which adds a random
component to the HTTP Request and thus should be ignored when calculating
the fingerprint.
For this reason, request headers are ignored by default when calculating
the fingeprint. If you want to include specific headers use the
include_headers argument, which is a list of Request headers to include.
"""
if include_headers:
include_headers = tuple(to_bytes(h.lower())
for h in sorted(include_headers))
cache = _fingerprint_cache.setdefault(request, {})
if include_headers not in cache:
fp = hashlib.sha1()
fp.update(to_bytes(request.method))
fp.update(to_bytes(canonicalize_url(request.url)))
fp.update(request.body or b'')
if include_headers:
for hdr in include_headers:
if hdr in request.headers:
fp.update(hdr)
for v in request.headers.getlist(hdr):
fp.update(v)
cache[include_headers] = fp.hexdigest()
return cache[include_headers]
def request_authenticate(request, username, password):
"""Autenticate the given request (in place) using the HTTP basic access
authentication mechanism (RFC 2617) and the given username and password
"""
request.headers['Authorization'] = basic_auth_header(username, password)
def request_httprepr(request):
"""Return the raw HTTP representation (as bytes) of the given request.
This is provided only for reference since it's not the actual stream of
bytes that will be send when performing the request (that's controlled
by Twisted).
"""
parsed = urlparse_cached(request)
path = urlunparse(('', '', parsed.path or '/', parsed.params, parsed.query, ''))
s = to_bytes(request.method) + b" " + to_bytes(path) + b" HTTP/1.1\r\n"
s += b"Host: " + to_bytes(parsed.hostname or b'') + b"\r\n"
if request.headers:
s += request.headers.to_string() + b"\r\n"
s += b"\r\n"
s += request.body
return s
def referer_str(request):
""" Return Referer HTTP header suitable for logging. """
referrer = request.headers.get('Referer')
if referrer is None:
return referrer
return to_native_str(referrer, errors='replace')
|
krisys/django | refs/heads/master | django/contrib/gis/db/models/sql/__init__.py | 476 | from django.contrib.gis.db.models.sql.conversion import (
AreaField, DistanceField, GeomField, GMLField,
)
__all__ = [
'AreaField', 'DistanceField', 'GeomField', 'GMLField'
]
|
fernandoacorreia/DjangoWAWSLogging | refs/heads/master | DjangoWAWSLogging/env/Lib/site-packages/django/utils/dateformat.py | 82 | """
PHP date() style date formatting
See http://www.php.net/date for format strings
Usage:
>>> import datetime
>>> d = datetime.datetime.now()
>>> df = DateFormat(d)
>>> print df.format('jS F Y H:i')
7th October 2003 11:39
>>>
"""
import re
import time
import calendar
import datetime
from django.utils.dates import MONTHS, MONTHS_3, MONTHS_ALT, MONTHS_AP, WEEKDAYS, WEEKDAYS_ABBR
from django.utils.tzinfo import LocalTimezone
from django.utils.translation import ugettext as _
from django.utils.encoding import force_unicode
from django.utils.timezone import is_aware, is_naive
re_formatchars = re.compile(r'(?<!\\)([aAbBcdDeEfFgGhHiIjlLmMnNoOPrsStTUuwWyYzZ])')
re_escaped = re.compile(r'\\(.)')
class Formatter(object):
def format(self, formatstr):
pieces = []
for i, piece in enumerate(re_formatchars.split(force_unicode(formatstr))):
if i % 2:
pieces.append(force_unicode(getattr(self, piece)()))
elif piece:
pieces.append(re_escaped.sub(r'\1', piece))
return u''.join(pieces)
class TimeFormat(Formatter):
def __init__(self, t):
self.data = t
def a(self):
"'a.m.' or 'p.m.'"
if self.data.hour > 11:
return _('p.m.')
return _('a.m.')
def A(self):
"'AM' or 'PM'"
if self.data.hour > 11:
return _('PM')
return _('AM')
def B(self):
"Swatch Internet time"
raise NotImplementedError
def f(self):
"""
Time, in 12-hour hours and minutes, with minutes left off if they're
zero.
Examples: '1', '1:30', '2:05', '2'
Proprietary extension.
"""
if self.data.minute == 0:
return self.g()
return u'%s:%s' % (self.g(), self.i())
def g(self):
"Hour, 12-hour format without leading zeros; i.e. '1' to '12'"
if self.data.hour == 0:
return 12
if self.data.hour > 12:
return self.data.hour - 12
return self.data.hour
def G(self):
"Hour, 24-hour format without leading zeros; i.e. '0' to '23'"
return self.data.hour
def h(self):
"Hour, 12-hour format; i.e. '01' to '12'"
return u'%02d' % self.g()
def H(self):
"Hour, 24-hour format; i.e. '00' to '23'"
return u'%02d' % self.G()
def i(self):
"Minutes; i.e. '00' to '59'"
return u'%02d' % self.data.minute
def P(self):
"""
Time, in 12-hour hours, minutes and 'a.m.'/'p.m.', with minutes left off
if they're zero and the strings 'midnight' and 'noon' if appropriate.
Examples: '1 a.m.', '1:30 p.m.', 'midnight', 'noon', '12:30 p.m.'
Proprietary extension.
"""
if self.data.minute == 0 and self.data.hour == 0:
return _('midnight')
if self.data.minute == 0 and self.data.hour == 12:
return _('noon')
return u'%s %s' % (self.f(), self.a())
def s(self):
"Seconds; i.e. '00' to '59'"
return u'%02d' % self.data.second
def u(self):
"Microseconds"
return self.data.microsecond
class DateFormat(TimeFormat):
year_days = [None, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334]
def __init__(self, dt):
# Accepts either a datetime or date object.
self.data = dt
self.timezone = None
if isinstance(dt, datetime.datetime):
if is_naive(dt):
self.timezone = LocalTimezone(dt)
else:
self.timezone = dt.tzinfo
def b(self):
"Month, textual, 3 letters, lowercase; e.g. 'jan'"
return MONTHS_3[self.data.month]
def c(self):
"""
ISO 8601 Format
Example : '2008-01-02T10:30:00.000123'
"""
return self.data.isoformat()
def d(self):
"Day of the month, 2 digits with leading zeros; i.e. '01' to '31'"
return u'%02d' % self.data.day
def D(self):
"Day of the week, textual, 3 letters; e.g. 'Fri'"
return WEEKDAYS_ABBR[self.data.weekday()]
def e(self):
"Timezone name if available"
try:
if hasattr(self.data, 'tzinfo') and self.data.tzinfo:
# Have to use tzinfo.tzname and not datetime.tzname
# because datatime.tzname does not expect Unicode
return self.data.tzinfo.tzname(self.data) or ""
except NotImplementedError:
pass
return ""
def E(self):
"Alternative month names as required by some locales. Proprietary extension."
return MONTHS_ALT[self.data.month]
def F(self):
"Month, textual, long; e.g. 'January'"
return MONTHS[self.data.month]
def I(self):
"'1' if Daylight Savings Time, '0' otherwise."
if self.timezone and self.timezone.dst(self.data):
return u'1'
else:
return u'0'
def j(self):
"Day of the month without leading zeros; i.e. '1' to '31'"
return self.data.day
def l(self):
"Day of the week, textual, long; e.g. 'Friday'"
return WEEKDAYS[self.data.weekday()]
def L(self):
"Boolean for whether it is a leap year; i.e. True or False"
return calendar.isleap(self.data.year)
def m(self):
"Month; i.e. '01' to '12'"
return u'%02d' % self.data.month
def M(self):
"Month, textual, 3 letters; e.g. 'Jan'"
return MONTHS_3[self.data.month].title()
def n(self):
"Month without leading zeros; i.e. '1' to '12'"
return self.data.month
def N(self):
"Month abbreviation in Associated Press style. Proprietary extension."
return MONTHS_AP[self.data.month]
def o(self):
"ISO 8601 year number matching the ISO week number (W)"
return self.data.isocalendar()[0]
def O(self):
"Difference to Greenwich time in hours; e.g. '+0200', '-0430'"
seconds = self.Z()
sign = '-' if seconds < 0 else '+'
seconds = abs(seconds)
return u"%s%02d%02d" % (sign, seconds // 3600, (seconds // 60) % 60)
def r(self):
"RFC 2822 formatted date; e.g. 'Thu, 21 Dec 2000 16:01:07 +0200'"
return self.format('D, j M Y H:i:s O')
def S(self):
"English ordinal suffix for the day of the month, 2 characters; i.e. 'st', 'nd', 'rd' or 'th'"
if self.data.day in (11, 12, 13): # Special case
return u'th'
last = self.data.day % 10
if last == 1:
return u'st'
if last == 2:
return u'nd'
if last == 3:
return u'rd'
return u'th'
def t(self):
"Number of days in the given month; i.e. '28' to '31'"
return u'%02d' % calendar.monthrange(self.data.year, self.data.month)[1]
def T(self):
"Time zone of this machine; e.g. 'EST' or 'MDT'"
name = self.timezone and self.timezone.tzname(self.data) or None
if name is None:
name = self.format('O')
return unicode(name)
def U(self):
"Seconds since the Unix epoch (January 1 1970 00:00:00 GMT)"
if isinstance(self.data, datetime.datetime) and is_aware(self.data):
return int(calendar.timegm(self.data.utctimetuple()))
else:
return int(time.mktime(self.data.timetuple()))
def w(self):
"Day of the week, numeric, i.e. '0' (Sunday) to '6' (Saturday)"
return (self.data.weekday() + 1) % 7
def W(self):
"ISO-8601 week number of year, weeks starting on Monday"
# Algorithm from http://www.personal.ecu.edu/mccartyr/ISOwdALG.txt
week_number = None
jan1_weekday = self.data.replace(month=1, day=1).weekday() + 1
weekday = self.data.weekday() + 1
day_of_year = self.z()
if day_of_year <= (8 - jan1_weekday) and jan1_weekday > 4:
if jan1_weekday == 5 or (jan1_weekday == 6 and calendar.isleap(self.data.year-1)):
week_number = 53
else:
week_number = 52
else:
if calendar.isleap(self.data.year):
i = 366
else:
i = 365
if (i - day_of_year) < (4 - weekday):
week_number = 1
else:
j = day_of_year + (7 - weekday) + (jan1_weekday - 1)
week_number = j // 7
if jan1_weekday > 4:
week_number -= 1
return week_number
def y(self):
"Year, 2 digits; e.g. '99'"
return unicode(self.data.year)[2:]
def Y(self):
"Year, 4 digits; e.g. '1999'"
return self.data.year
def z(self):
"Day of the year; i.e. '0' to '365'"
doy = self.year_days[self.data.month] + self.data.day
if self.L() and self.data.month > 2:
doy += 1
return doy
def Z(self):
"""
Time zone offset in seconds (i.e. '-43200' to '43200'). The offset for
timezones west of UTC is always negative, and for those east of UTC is
always positive.
"""
if not self.timezone:
return 0
offset = self.timezone.utcoffset(self.data)
# `offset` is a datetime.timedelta. For negative values (to the west of
# UTC) only days can be negative (days=-1) and seconds are always
# positive. e.g. UTC-1 -> timedelta(days=-1, seconds=82800, microseconds=0)
# Positive offsets have days=0
return offset.days * 86400 + offset.seconds
def format(value, format_string):
"Convenience function"
df = DateFormat(value)
return df.format(format_string)
def time_format(value, format_string):
"Convenience function"
tf = TimeFormat(value)
return tf.format(format_string)
|
ondra-novak/chromium.src | refs/heads/nw | chrome/common/extensions/docs/server2/availability_finder.py | 6 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import posixpath
from api_models import GetNodeCategories
from api_schema_graph import APISchemaGraph
from branch_utility import BranchUtility, ChannelInfo
from compiled_file_system import CompiledFileSystem, SingleFile, Unicode
from extensions_paths import API_PATHS, JSON_TEMPLATES
from features_bundle import FeaturesBundle
from file_system import FileNotFoundError
from schema_util import ProcessSchema
from third_party.json_schema_compiler.memoize import memoize
from third_party.json_schema_compiler.model import UnixName
_DEVTOOLS_API = 'devtools_api.json'
_EXTENSION_API = 'extension_api.json'
# The version where api_features.json is first available.
_API_FEATURES_MIN_VERSION = 28
# The version where permission_ and manifest_features.json are available and
# presented in the current format.
_ORIGINAL_FEATURES_MIN_VERSION = 20
# API schemas are aggregated in extension_api.json up to this version.
_EXTENSION_API_MAX_VERSION = 17
# The earliest version for which we have SVN data.
_SVN_MIN_VERSION = 5
def _GetChannelFromFeatures(api_name, features):
'''Finds API channel information for |api_name| from |features|.
Returns None if channel information for the API cannot be located.
'''
feature = features.Get().get(api_name)
return feature.get('channel') if feature else None
def _GetChannelFromAPIFeatures(api_name, features_bundle):
return _GetChannelFromFeatures(api_name, features_bundle.GetAPIFeatures())
def _GetChannelFromManifestFeatures(api_name, features_bundle):
# _manifest_features.json uses unix_style API names.
api_name = UnixName(api_name)
return _GetChannelFromFeatures(api_name,
features_bundle.GetManifestFeatures())
def _GetChannelFromPermissionFeatures(api_name, features_bundle):
return _GetChannelFromFeatures(api_name,
features_bundle.GetPermissionFeatures())
def _GetAPISchemaFilename(api_name, file_system, version):
'''Gets the name of the file which may contain the schema for |api_name| in
|file_system|, or None if the API is not found. Note that this may be the
single _EXTENSION_API file which all APIs share in older versions of Chrome,
in which case it is unknown whether the API actually exists there.
'''
if version == 'trunk' or version > _ORIGINAL_FEATURES_MIN_VERSION:
# API schema filenames switch format to unix_hacker_style.
api_name = UnixName(api_name)
# Devtools API names have 'devtools.' prepended to them.
# The corresponding filenames do not.
if 'devtools_' in api_name:
api_name = api_name.replace('devtools_', '')
for api_path in API_PATHS:
try:
for base, _, filenames in file_system.Walk(api_path):
for ext in ('json', 'idl'):
filename = '%s.%s' % (api_name, ext)
if filename in filenames:
return posixpath.join(api_path, base, filename)
if _EXTENSION_API in filenames:
return posixpath.join(api_path, base, _EXTENSION_API)
except FileNotFoundError:
continue
return None
class AvailabilityInfo(object):
'''Represents availability data for an API. |scheduled| is a version number
specifying when dev and beta APIs will become stable, or None if that data
is unknown.
'''
def __init__(self, channel_info, scheduled=None):
assert isinstance(channel_info, ChannelInfo)
assert isinstance(scheduled, int) or scheduled is None
self.channel_info = channel_info
self.scheduled = scheduled
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return '%s%s' % (type(self).__name__, repr(self.__dict__))
def __str__(self):
return repr(self)
class AvailabilityFinder(object):
'''Generates availability information for APIs by looking at API schemas and
_features files over multiple release versions of Chrome.
'''
def __init__(self,
branch_utility,
compiled_fs_factory,
file_system_iterator,
host_file_system,
object_store_creator,
platform):
self._branch_utility = branch_utility
self._compiled_fs_factory = compiled_fs_factory
self._file_system_iterator = file_system_iterator
self._host_file_system = host_file_system
self._object_store_creator = object_store_creator
def create_object_store(category):
return object_store_creator.Create(
AvailabilityFinder, category='/'.join((platform, category)))
self._top_level_object_store = create_object_store('top_level')
self._node_level_object_store = create_object_store('node_level')
self._json_fs = compiled_fs_factory.ForJson(self._host_file_system)
self._platform = platform
def _GetPredeterminedAvailability(self, api_name):
'''Checks a configuration file for hardcoded (i.e. predetermined)
availability information for an API.
'''
api_info = self._json_fs.GetFromFile(
JSON_TEMPLATES + 'api_availabilities.json').Get().get(api_name)
if api_info is None:
return None
if api_info['channel'] == 'stable':
return AvailabilityInfo(
self._branch_utility.GetStableChannelInfo(api_info['version']))
return AvailabilityInfo(
self._branch_utility.GetChannelInfo(api_info['channel']))
@memoize
def _CreateAPISchemaFileSystem(self, file_system):
'''Creates a CompiledFileSystem for parsing raw JSON or IDL API schema
data and formatting it so that it can be used to create APISchemaGraphs.
'''
# When processing the API schemas, we retain inlined types in the schema
# so that there are not missing nodes in the APISchemaGraphs when trying
# to lookup availability.
def process_schema(path, data):
return ProcessSchema(path, data, retain_inlined_types=True)
return self._compiled_fs_factory.Create(file_system,
SingleFile(Unicode(process_schema)),
CompiledFileSystem,
category='api-schema')
def _GetAPISchema(self, api_name, file_system, version):
'''Searches |file_system| for |api_name|'s API schema data, and processes
and returns it if found.
'''
api_filename = _GetAPISchemaFilename(api_name, file_system, version)
if api_filename is None:
# No file for the API could be found in the given |file_system|.
return None
schema_fs = self._CreateAPISchemaFileSystem(file_system)
api_schemas = schema_fs.GetFromFile(api_filename).Get()
matching_schemas = [api for api in api_schemas
if api['namespace'] == api_name]
# There should only be a single matching schema per file, or zero in the
# case of no API data being found in _EXTENSION_API.
assert len(matching_schemas) <= 1
return matching_schemas or None
def _HasAPISchema(self, api_name, file_system, version):
'''Whether or not an API schema for |api_name| exists in the given
|file_system|.
'''
filename = _GetAPISchemaFilename(api_name, file_system, version)
if filename is None:
return False
if filename.endswith(_EXTENSION_API) or filename.endswith(_DEVTOOLS_API):
return self._GetAPISchema(api_name, file_system, version) is not None
return True
def _CheckStableAvailability(self,
api_name,
file_system,
version,
earliest_version=None):
'''Checks for availability of an API, |api_name|, on the stable channel.
Considers several _features.json files, file system existence, and
extension_api.json depending on the given |version|.
|earliest_version| is the version of Chrome at which |api_name| first became
available. It should only be given when checking stable availability for
API nodes, so it can be used as an alternative to the check for filesystem
existence.
'''
earliest_version = earliest_version or _SVN_MIN_VERSION
if version < earliest_version:
# SVN data isn't available below this version.
return False
features_bundle = self._CreateFeaturesBundle(file_system)
available_channel = None
if version >= _API_FEATURES_MIN_VERSION:
# The _api_features.json file first appears in version 28 and should be
# the most reliable for finding API availability.
available_channel = _GetChannelFromAPIFeatures(api_name,
features_bundle)
if version >= _ORIGINAL_FEATURES_MIN_VERSION:
# The _permission_features.json and _manifest_features.json files are
# present in Chrome 20 and onwards. Use these if no information could be
# found using _api_features.json.
available_channel = (
available_channel or
_GetChannelFromPermissionFeatures(api_name, features_bundle) or
_GetChannelFromManifestFeatures(api_name, features_bundle))
if available_channel is not None:
return available_channel == 'stable'
# |earliest_version| == _SVN_MIN_VERSION implies we're dealing with an API.
# Fall back to a check for file system existence if the API is not
# stable in any of the _features.json files, or if the _features files
# do not exist (version 19 and earlier).
if earliest_version == _SVN_MIN_VERSION:
return self._HasAPISchema(api_name, file_system, version)
# For API nodes, assume it's available if |version| is greater than the
# version the node became available (which it is, because of the first
# check).
return True
def _CheckChannelAvailability(self, api_name, file_system, channel_info):
'''Searches through the _features files in a given |file_system|, falling
back to checking the file system for API schema existence, to determine
whether or not an API is available on the given channel, |channel_info|.
'''
features_bundle = self._CreateFeaturesBundle(file_system)
available_channel = (
_GetChannelFromAPIFeatures(api_name, features_bundle) or
_GetChannelFromPermissionFeatures(api_name, features_bundle) or
_GetChannelFromManifestFeatures(api_name, features_bundle))
if (available_channel is None and
self._HasAPISchema(api_name, file_system, channel_info.version)):
# If an API is not represented in any of the _features files, but exists
# in the filesystem, then assume it is available in this version.
# The chrome.windows API is an example of this.
available_channel = channel_info.channel
# If the channel we're checking is the same as or newer than the
# |available_channel| then the API is available at this channel.
newest = BranchUtility.NewestChannel((available_channel,
channel_info.channel))
return available_channel is not None and newest == channel_info.channel
def _CheckChannelAvailabilityForNode(self,
node_name,
file_system,
channel_info,
earliest_channel_info):
'''Searches through the _features files in a given |file_system| to
determine whether or not an API node is available on the given channel,
|channel_info|. |earliest_channel_info| is the earliest channel the node
was introduced.
'''
features_bundle = self._CreateFeaturesBundle(file_system)
available_channel = None
# Only API nodes can have their availability overriden on a per-node basis,
# so we only need to check _api_features.json.
if channel_info.version >= _API_FEATURES_MIN_VERSION:
available_channel = _GetChannelFromAPIFeatures(node_name, features_bundle)
if (available_channel is None and
channel_info.version >= earliest_channel_info.version):
# Most API nodes inherit their availabiltity from their parent, so don't
# explicitly appear in _api_features.json. For example, "tabs.create"
# isn't listed; it inherits from "tabs". Assume these are available at
# |channel_info|.
available_channel = channel_info.channel
newest = BranchUtility.NewestChannel((available_channel,
channel_info.channel))
return available_channel is not None and newest == channel_info.channel
@memoize
def _CreateFeaturesBundle(self, file_system):
return FeaturesBundle(file_system,
self._compiled_fs_factory,
self._object_store_creator,
self._platform)
def _CheckAPIAvailability(self, api_name, file_system, channel_info):
'''Determines the availability for an API at a certain version of Chrome.
Two branches of logic are used depending on whether or not the API is
determined to be 'stable' at the given version.
'''
if channel_info.channel == 'stable':
return self._CheckStableAvailability(api_name,
file_system,
channel_info.version)
return self._CheckChannelAvailability(api_name,
file_system,
channel_info)
def _FindScheduled(self, api_name, earliest_version=None):
'''Determines the earliest version of Chrome where the API is stable.
Unlike the code in GetAPIAvailability, this checks if the API is stable
even when Chrome is in dev or beta, which shows that the API is scheduled
to be stable in that verison of Chrome. |earliest_version| is the version
|api_name| became first available. Only use it when finding scheduled
availability for nodes.
'''
def check_scheduled(file_system, channel_info):
return self._CheckStableAvailability(api_name,
file_system,
channel_info.version,
earliest_version=earliest_version)
stable_channel = self._file_system_iterator.Descending(
self._branch_utility.GetChannelInfo('dev'), check_scheduled)
return stable_channel.version if stable_channel else None
def _CheckAPINodeAvailability(self, node_name, earliest_channel_info):
'''Gets availability data for a node by checking _features files.
'''
def check_node_availability(file_system, channel_info):
return self._CheckChannelAvailabilityForNode(node_name,
file_system,
channel_info,
earliest_channel_info)
channel_info = (self._file_system_iterator.Descending(
self._branch_utility.GetChannelInfo('dev'), check_node_availability) or
earliest_channel_info)
if channel_info.channel == 'stable':
scheduled = None
else:
scheduled = self._FindScheduled(
node_name,
earliest_version=earliest_channel_info.version)
return AvailabilityInfo(channel_info, scheduled=scheduled)
def GetAPIAvailability(self, api_name):
'''Performs a search for an API's top-level availability by using a
HostFileSystemIterator instance to traverse multiple version of the
SVN filesystem.
'''
availability = self._top_level_object_store.Get(api_name).Get()
if availability is not None:
return availability
# Check for predetermined availability and cache this information if found.
availability = self._GetPredeterminedAvailability(api_name)
if availability is not None:
self._top_level_object_store.Set(api_name, availability)
return availability
def check_api_availability(file_system, channel_info):
return self._CheckAPIAvailability(api_name, file_system, channel_info)
channel_info = self._file_system_iterator.Descending(
self._branch_utility.GetChannelInfo('dev'),
check_api_availability)
if channel_info is None:
# The API wasn't available on 'dev', so it must be a 'trunk'-only API.
channel_info = self._branch_utility.GetChannelInfo('trunk')
# If the API is not stable, check when it will be scheduled to be stable.
if channel_info.channel == 'stable':
scheduled = None
else:
scheduled = self._FindScheduled(api_name)
availability = AvailabilityInfo(channel_info, scheduled=scheduled)
self._top_level_object_store.Set(api_name, availability)
return availability
def GetAPINodeAvailability(self, api_name):
'''Returns an APISchemaGraph annotated with each node's availability (the
ChannelInfo at the oldest channel it's available in).
'''
availability_graph = self._node_level_object_store.Get(api_name).Get()
if availability_graph is not None:
return availability_graph
def assert_not_none(value):
assert value is not None
return value
availability_graph = APISchemaGraph()
host_fs = self._host_file_system
trunk_stat = assert_not_none(host_fs.Stat(_GetAPISchemaFilename(
api_name, host_fs, 'trunk')))
# Weird object thing here because nonlocal is Python 3.
previous = type('previous', (object,), {'stat': None, 'graph': None})
def update_availability_graph(file_system, channel_info):
# If we can't find a filename, skip checking at this branch.
# For example, something could have a predetermined availability of 23,
# but it doesn't show up in the file system until 26.
# We know that the file will become available at some point.
#
# The problem with this is that at the first version where the API file
# exists, we'll get a huge chunk of new objects that don't match
# the predetermined API availability.
version_filename = _GetAPISchemaFilename(api_name,
file_system,
channel_info.version)
if version_filename is None:
# Continue the loop at the next version.
return True
version_stat = assert_not_none(file_system.Stat(version_filename))
# Important optimisation: only re-parse the graph if the file changed in
# the last revision. Parsing the same schema and forming a graph on every
# iteration is really expensive.
if version_stat == previous.stat:
version_graph = previous.graph
else:
# Keep track of any new schema elements from this version by adding
# them to |availability_graph|.
#
# Calling |availability_graph|.Lookup() on the nodes being updated
# will return the |annotation| object -- the current |channel_info|.
version_graph = APISchemaGraph(
api_schema=self._GetAPISchema(api_name,
file_system,
channel_info.version))
def annotator(node_name):
return self._CheckAPINodeAvailability('%s.%s' % (api_name, node_name),
channel_info)
availability_graph.Update(version_graph.Subtract(availability_graph),
annotator)
previous.stat = version_stat
previous.graph = version_graph
# Continue looping until there are no longer differences between this
# version and trunk.
return version_stat != trunk_stat
self._file_system_iterator.Ascending(
self.GetAPIAvailability(api_name).channel_info,
update_availability_graph)
self._node_level_object_store.Set(api_name, availability_graph)
return availability_graph
|
DirtyUnicorns/android_external_chromium-org | refs/heads/kitkat | third_party/bintrees/bintrees/__init__.py | 156 | #!/usr/bin/env python
#coding:utf-8
# Author: mozman
# Purpose: binary trees package
# Created: 03.05.2010
# Copyright (c) 2010-2013 by Manfred Moitzi
# License: MIT License
from __future__ import absolute_import
__doc__ = """
Binary Tree Package
===================
Python Trees
------------
Balanced and unbalance binary trees written in pure Python with a dict-like API.
Classes
~~~~~~~
* BinaryTree -- unbalanced binary tree
* AVLTree -- balanced AVL-Tree
* RBTree -- balanced Red-Black-Tree
Cython Trees
------------
Basic tree functions written in Cython, merged with TreeMixin to provide the
full API of the Python Trees.
Classes
~~~~~~~
* FastBinaryTree -- unbalanced binary tree
* FastAVLTree -- balanced AVLTree
* FastRBTree -- balanced Red-Black-Tree
Overview of API for all Classes
===============================
* TreeClass ([compare]) -> new empty tree.
* TreeClass(mapping, [compare]) -> new tree initialized from a mapping
* TreeClass(seq, [compare]) -> new tree initialized from seq [(k1, v1), (k2, v2), ... (kn, vn)]
Methods
-------
* __contains__(k) -> True if T has a key k, else False, O(log(n))
* __delitem__(y) <==> del T[y], O(log(n))
* __getitem__(y) <==> T[y], O(log(n))
* __iter__() <==> iter(T)
* __len__() <==> len(T), O(1)
* __max__() <==> max(T), get max item (k,v) of T, O(log(n))
* __min__() <==> min(T), get min item (k,v) of T, O(log(n))
* __and__(other) <==> T & other, intersection
* __or__(other) <==> T | other, union
* __sub__(other) <==> T - other, difference
* __xor__(other) <==> T ^ other, symmetric_difference
* __repr__() <==> repr(T)
* __setitem__(k, v) <==> T[k] = v, O(log(n))
* clear() -> None, Remove all items from T, , O(n)
* copy() -> a shallow copy of T, O(n*log(n))
* discard(k) -> None, remove k from T, if k is present, O(log(n))
* get(k[,d]) -> T[k] if k in T, else d, O(log(n))
* is_empty() -> True if len(T) == 0, O(1)
* items([reverse]) -> list of T's (k, v) pairs, as 2-tuples, O(n)
* keys([reverse]) -> list of T's keys, O(n)
* pop(k[,d]) -> v, remove specified key and return the corresponding value, O(log(n))
* popitem() -> (k, v), remove and return some (key, value) pair as a 2-tuple, O(log(n))
* setdefault(k[,d]) -> T.get(k, d), also set T[k]=d if k not in T, O(log(n))
* update(E) -> None. Update T from dict/iterable E, O(E*log(n))
* values([reverse]) -> list of T's values, O(n)
walk forward/backward, O(log(n))
* prev_item(key) -> get (k, v) pair, where k is predecessor to key, O(log(n))
* prev_key(key) -> k, get the predecessor of key, O(log(n))
* succ_item(key) -> get (k,v) pair as a 2-tuple, where k is successor to key, O(log(n))
* succ_key(key) -> k, get the successor of key, O(log(n))
slicing by keys
* itemslice(s, e) -> generator for (k, v) items of T for s <= key < e, O(n)
* keyslice(s, e) -> generator for keys of T for s <= key < e, O(n)
* valueslice(s, e) -> generator for values of T for s <= key < e, O(n)
* T[s:e] -> TreeSlice object, with keys in range s <= key < e, O(n)
* del T[s:e] -> remove items by key slicing, for s <= key < e, O(n)
if 's' is None or T[:e] TreeSlice/iterator starts with value of min_key()
if 'e' is None or T[s:] TreeSlice/iterator ends with value of max_key()
T[:] is a TreeSlice which represents the whole tree.
TreeSlice is a tree wrapper with range check, and contains no references
to objects, deleting objects in the associated tree also deletes the object
in the TreeSlice.
* TreeSlice[k] -> get value for key k, raises KeyError if k not exists in range s:e
* TreeSlice[s1:e1] -> TreeSlice object, with keys in range s1 <= key < e1
* new lower bound is max(s, s1)
* new upper bound is min(e, e1)
TreeSlice methods:
* items() -> generator for (k, v) items of T, O(n)
* keys() -> generator for keys of T, O(n)
* values() -> generator for values of T, O(n)
* __iter__ <==> keys()
* __repr__ <==> repr(T)
* __contains__(key)-> True if TreeSlice has a key k, else False, O(log(n))
Heap methods
* max_item() -> get biggest (key, value) pair of T, O(log(n))
* max_key() -> get biggest key of T, O(log(n))
* min_item() -> get smallest (key, value) pair of T, O(log(n))
* min_key() -> get smallest key of T, O(log(n))
* pop_min() -> (k, v), remove item with minimum key, O(log(n))
* pop_max() -> (k, v), remove item with maximum key, O(log(n))
* nlargest(i[,pop]) -> get list of i largest items (k, v), O(i*log(n))
* nsmallest(i[,pop]) -> get list of i smallest items (k, v), O(i*log(n))
Set methods (using frozenset)
* intersection(t1, t2, ...) -> Tree with keys *common* to all trees
* union(t1, t2, ...) -> Tree with keys from *either* trees
* difference(t1, t2, ...) -> Tree with keys in T but not any of t1, t2, ...
* symmetric_difference(t1) -> Tree with keys in either T and t1 but not both
* issubset(S) -> True if every element in T is in S
* issuperset(S) -> True if every element in S is in T
* isdisjoint(S) -> True if T has a null intersection with S
Classmethods
* fromkeys(S[,v]) -> New tree with keys from S and values equal to v.
"""
__all__ = [
'FastBinaryTree',
'FastAVLTree',
'FastRBTree',
'BinaryTree',
'AVLTree',
'RBTree'
]
from .treemixin import TreeMixin
from .bintree import BinaryTree
from .avltree import AVLTree
from .rbtree import RBTree
try:
from .qbintree import cBinaryTree
class FastBinaryTree(cBinaryTree, TreeMixin):
""" Faster unbalanced binary tree written in Cython with C-Code. """
except ImportError: # fall back to pure Python version
FastBinaryTree = BinaryTree
except ValueError: # for pypy
FastBinaryTree = BinaryTree
try:
from .qavltree import cAVLTree
class FastAVLTree(cAVLTree, TreeMixin):
""" Faster balanced AVL-Tree written in Cython with C-Code. """
except ImportError: # fall back to pure Python version
FastAVLTree = AVLTree
except ValueError: # for pypy
FastAVLTree = AVLTree
try:
from .qrbtree import cRBTree
class FastRBTree(cRBTree, TreeMixin):
""" Faster balanced Red-Black-Tree written in Cython with C-Code. """
except ImportError: # fall back to pure Python version
FastRBTree = RBTree
except ValueError: # for pypy
FastRBTree = RBTree
|
lavvy/osmc | refs/heads/master | package/mediacenter-skin-osmc/files/usr/share/kodi/addons/script.module.unidecode/lib/unidecode/x003.py | 246 | data = (
'', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'', # 0x14
'', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'', # 0x20
'', # 0x21
'', # 0x22
'', # 0x23
'', # 0x24
'', # 0x25
'', # 0x26
'', # 0x27
'', # 0x28
'', # 0x29
'', # 0x2a
'', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
'', # 0x2f
'', # 0x30
'', # 0x31
'', # 0x32
'', # 0x33
'', # 0x34
'', # 0x35
'', # 0x36
'', # 0x37
'', # 0x38
'', # 0x39
'', # 0x3a
'', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'', # 0x40
'', # 0x41
'', # 0x42
'', # 0x43
'', # 0x44
'', # 0x45
'', # 0x46
'', # 0x47
'', # 0x48
'', # 0x49
'', # 0x4a
'', # 0x4b
'', # 0x4c
'', # 0x4d
'', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
'[?]', # 0x53
'[?]', # 0x54
'[?]', # 0x55
'[?]', # 0x56
'[?]', # 0x57
'[?]', # 0x58
'[?]', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'', # 0x60
'', # 0x61
'', # 0x62
'a', # 0x63
'e', # 0x64
'i', # 0x65
'o', # 0x66
'u', # 0x67
'c', # 0x68
'd', # 0x69
'h', # 0x6a
'm', # 0x6b
'r', # 0x6c
't', # 0x6d
'v', # 0x6e
'x', # 0x6f
'[?]', # 0x70
'[?]', # 0x71
'[?]', # 0x72
'[?]', # 0x73
'\'', # 0x74
',', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'?', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'[?]', # 0x81
'[?]', # 0x82
'[?]', # 0x83
'', # 0x84
'', # 0x85
'A', # 0x86
';', # 0x87
'E', # 0x88
'E', # 0x89
'I', # 0x8a
'[?]', # 0x8b
'O', # 0x8c
'[?]', # 0x8d
'U', # 0x8e
'O', # 0x8f
'I', # 0x90
'A', # 0x91
'B', # 0x92
'G', # 0x93
'D', # 0x94
'E', # 0x95
'Z', # 0x96
'E', # 0x97
'Th', # 0x98
'I', # 0x99
'K', # 0x9a
'L', # 0x9b
'M', # 0x9c
'N', # 0x9d
'Ks', # 0x9e
'O', # 0x9f
'P', # 0xa0
'R', # 0xa1
'[?]', # 0xa2
'S', # 0xa3
'T', # 0xa4
'U', # 0xa5
'Ph', # 0xa6
'Kh', # 0xa7
'Ps', # 0xa8
'O', # 0xa9
'I', # 0xaa
'U', # 0xab
'a', # 0xac
'e', # 0xad
'e', # 0xae
'i', # 0xaf
'u', # 0xb0
'a', # 0xb1
'b', # 0xb2
'g', # 0xb3
'd', # 0xb4
'e', # 0xb5
'z', # 0xb6
'e', # 0xb7
'th', # 0xb8
'i', # 0xb9
'k', # 0xba
'l', # 0xbb
'm', # 0xbc
'n', # 0xbd
'x', # 0xbe
'o', # 0xbf
'p', # 0xc0
'r', # 0xc1
's', # 0xc2
's', # 0xc3
't', # 0xc4
'u', # 0xc5
'ph', # 0xc6
'kh', # 0xc7
'ps', # 0xc8
'o', # 0xc9
'i', # 0xca
'u', # 0xcb
'o', # 0xcc
'u', # 0xcd
'o', # 0xce
'[?]', # 0xcf
'b', # 0xd0
'th', # 0xd1
'U', # 0xd2
'U', # 0xd3
'U', # 0xd4
'ph', # 0xd5
'p', # 0xd6
'&', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'St', # 0xda
'st', # 0xdb
'W', # 0xdc
'w', # 0xdd
'Q', # 0xde
'q', # 0xdf
'Sp', # 0xe0
'sp', # 0xe1
'Sh', # 0xe2
'sh', # 0xe3
'F', # 0xe4
'f', # 0xe5
'Kh', # 0xe6
'kh', # 0xe7
'H', # 0xe8
'h', # 0xe9
'G', # 0xea
'g', # 0xeb
'CH', # 0xec
'ch', # 0xed
'Ti', # 0xee
'ti', # 0xef
'k', # 0xf0
'r', # 0xf1
'c', # 0xf2
'j', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
|
baharev/sdopt-tearing | refs/heads/master | ilp_tear.py | 1 | # Copyright (C) 2014, 2015 University of Vienna
# All rights reserved.
# BSD license.
# Author: Ali Baharev <ali.baharev@gmail.com>
from __future__ import division, print_function
from contextlib import contextmanager
from time import time
from six import iteritems, itervalues
from gurobipy import LinExpr, GRB, Model, setParam
import networkx as nx
from benchmarks import gen_benchmark_digraphs, digraph_to_undirected_bipartite,\
gen_digraphs_as_rectangular_bipartite
from equations import info_on_bipartite_graph, read_bipartite_graph
from heap_md import min_degree
from mfes import run_mfes_heuristic
from order_util import deterministic_topological_sort, permute_to_hessenberg
from test_tearing import gen_testproblems
from utils import edges_of_cycle, rotate_min_to_first_pos, solve_ilp
from plot_ordering import plot_hessenberg
__all__ = [ 'solve_problem' ]
# Forked from grb_tear
#
# Here (and also in grb_tear), unlike in other modules, a loop is just a simple
# path of nodes. Elsewhere, the loops are represented as sequence of edges.
def log(*args, **kwargs): pass
log = print
def main():
setParam('LogFile', '/tmp/gurobi.log')
#setParam('OutputFlag', 0)
setParam('LogToConsole', 0)
#
solve_digraphs_as_rectangular_bipartite()
real_main(use_min_degree=False)
real_main(use_min_degree=True)
def solve_digraphs_as_rectangular_bipartite():
for g, eqs in gen_digraphs_as_rectangular_bipartite():
forbidden = set()
solve_problem(g, eqs, forbidden)
def real_main(use_min_degree):
#pname = 'JacobsenShortestSimpBounds'
pname = 'JacobsenILOSimpBounds'
g, eqs, forbidden = read_bipartite_graph(pname)
#feasible_solution = read_feasible_solution(pname, g, eqs, forbidden)
#
solve_problem(g, eqs, forbidden, use_min_degree)
#
for g, eqs, forbidden in gen_testproblems():
info_on_bipartite_graph(g, eqs, forbidden, log=log)
solve_problem(g, eqs, forbidden, use_min_degree)
#
# A bipartite graph, triggering poor performance
dig = next( gen_benchmark_digraphs() )
for n in dig.nodes():
if n > 20:
dig.remove_node(n)
g, eqs, forbidden = digraph_to_undirected_bipartite(dig)
solve_problem(g, eqs, forbidden)
def solve_problem(g, eqs, forbidden, use_min_degree=True):
# Returns rowp, colp, matches, tear_set, sink_set in Hessenberg form.
start = time()
ret = solve_with_pcm(g, eqs, forbidden, use_min_degree)
end = time()
log('Overall solution time: {0:0.1f} s'.format(end-start))
return ret
def solve_with_pcm(g, eqs, forbidden, use_min_degree):
eqs = set(eqs)
# Invariant: match belongs to the best known feasible solution (dag), and
# match is (hopefully) a near optimal solution to the relaxation
dag, tears, ub, match, loops = \
create_feasible_solution(g, eqs, forbidden, use_min_degree)
log('UB <=', ub)
# Build a shortest path loop around each allowed edge of the tear variables
loops |= path_around_tears(dag, eqs, forbidden, tears)
#
candids, dag, match, ub = step(g, eqs, forbidden, loops, dag, match, ub)
while candids:
loops |= candids # Replace with a greedy heuristic?
# Try to put a shortest path loop around each erroneously matched edge,
# and try to improve the lower bound and/or the feasible solution (upper
# bound).
candids, dag, match, ub = step(g, eqs, forbidden, loops, dag, match, ub)
#
variables = sorted(n for n in dag if n not in eqs)
# all tears are sources, no other variable is source
sources = sorted(n for n,indeg in dag.in_degree_iter(variables) if indeg==0)
rowp, colp, matches, tear_set, sink_set = \
permute_to_hessenberg(g, eqs, forbidden, sources)
assert ub==len(tear_set), (sorted(sources), sorted(tear_set))
log()
log('*** Optimal solution found ***')
log('Number of tear variables: ', len(tear_set))
log('Number of residual equations:', len(sink_set))
return rowp, colp, matches, tear_set, sink_set
def create_feasible_solution(g, eqs, forbidden, use_min_degree):
if use_min_degree:
method_name = 'minimum-degree ordering'
else:
method_name = 'small loop subset selection'
log('Feasible solution will be generated with', method_name)
start = time()
#
if use_min_degree:
# TODO Suboptimal: match recomputed; type of tears is either set or list
rowp, colp, matches, tears, sinks = min_degree(g, eqs, forbidden)
dag = matching_to_dag(g, eqs, forbidden, rowp, colp, matches, tears, sinks)
ub = len(tears)
loops = set()
else:
dag, tears, ub, loops = feasible_sol_from_loop_subset(g, eqs, forbidden)
#
end = time()
log('Generating a feasible solution took: {0:0.2f} s'.format(end-start))
match = get_matching(dag, eqs)
return dag, tears, ub, match, loops
def feasible_sol_from_loop_subset(g, eqs, forbidden):
loops = initial_subset(g, eqs, forbidden)
match, lb = solve_relaxation(g, eqs, forbidden, loops)
dig = orient_wrt_matching(g, eqs, match, lb)
tears = mfes_heuristic(dig, eqs)
ub = len(tears) + lb
digraph_to_dag(dig, eqs, tears, lb)
return dig, tears, ub, loops
def get_matching(dag, eqs):
# The heap-based min-degree has the matching, so it is somewhat wasteful...
matching = [ ]
non_sink_eqs = (eq for eq in sorted(eqs) if dag.succ[eq])
for eq in non_sink_eqs:
(var,) = dag.succ[eq] # exactly one out edge in a valid elimination
matching.append( (eq,var) )
return matching
def path_around_tears(dag, eqs, forbidden, tears):
# Build a shortest path loop around each allowed edge of the tear variables.
# A loop is a candidate iff *all* of its edges would go into an allowed
# direction.
assert nx.is_directed_acyclic_graph(dag)
loops = set()
for var in tears:
edges = [(eq,var) for eq in dag[var] if (eq,var) not in forbidden]
for eq,var in edges:
# Removing var -> eq is necessary in all cases
with removed_edge(dag, var, eq):
loops.update( loops_around_edge(dag, eqs, eq, var) )
return loops
@contextmanager
def removed_edge(dag, u, v):
dag.remove_edge(u,v)
try:
yield
finally:
dag.add_edge(u,v)
def loops_around_edge(dag, eqs, eq, var):
assert not dag.has_edge(eq, var), 'var is supposed to be a tear'
loops = set()
# First, remove none of the neighbors of eq or var:
add_a_loop(dag, eqs, eq, var, loops)
# TODO Unclear which loop generation strategy is "the best". This one tries
# to put more than one loop around the edges by systematically removing
# edges incident to eq or var or both.
#
## Only neighbor of eq is removed:
#for eq_nbr in dag.predecessors(eq):
# with removed_edge(dag, eq_nbr, eq):
# add_a_loop(dag, eqs, eq, var, loops)
## Try removing neighbors of var or both var and eq
#for var_nbr in dag.successors(var):
# with removed_edge(dag, var, var_nbr):
# # Only neighbor of var is removed:
# add_a_loop(dag, eqs, eq, var, loops)
# # Both neighbor of eq and var removed:
# for eq_nbr in dag.predecessors(eq):
# with removed_edge(dag, eq_nbr, eq):
# add_a_loop(dag, eqs, eq, var, loops)
#
return loops
def add_a_loop(dag, eqs, eq, var, loops):
# create a small cycle around edge eq -> var with a shortest path var ~> eq
try:
simple_path = nx.shortest_path(dag,var,eq)
loops.add( to_normalized_path(simple_path, eqs) )
except nx.NetworkXNoPath:
pass # That's OK
def to_normalized_path(simple_path, eqs):
# Only for bipartite graphs. Rotate the equation with the smallest id into
# the first position, then traverse the loop, starting with that neighbor
# variable that has the smaller id.
equations = [ (n,i) for i,n in enumerate(simple_path) if n in eqs ]
idx = min(equations)[1]
left, right = simple_path[idx-1], simple_path[(idx+1) % len(simple_path)]
step = -1 if left < right else 1
path = simple_path[idx::step] + simple_path[:idx:step]
#--- TODO Remove when done, only for debugging
assert sorted(simple_path)==sorted(path)
inarg = rotate_min_to_first_pos(simple_path)
out_1 = rotate_min_to_first_pos(path)
out_2 = rotate_min_to_first_pos(list(reversed(path)))
assert inarg==out_1 or inarg==out_2
#---
return tuple(path)
def step(g, eqs, forbidden, loops, feas_dag, match, ub):
# loops give a partial cycle matrix, match a feasible solution
relax_matching, lb = solve_relaxation(g, eqs, forbidden, loops, match)
assert lb <= ub
if lb == ub:
log('The best feasible solution is proved to be optimal, LB = UB =', ub)
return [ ], feas_dag, match, ub # feas_dag is now proved to be optimal
#
# Check whether the relax_matching gives an acyclic orientation of the
# original undirected graph
dig = orient_wrt_matching(g, eqs, relax_matching, lb)
if nx.is_directed_acyclic_graph(dig):
log('The relaxation became feasible, meaning UB = LB =', lb)
return [ ], dig, relax_matching, lb
#
log('LB >= {} (UB <= {})'.format(lb, ub))
# Neither lb==ub nor acyclic; adding the missed loops
tears = mfes_heuristic(dig, eqs)
assert tears
digraph_to_dag(dig, eqs, tears, lb) # checks: new_ub == len(tears) + lb
candidates = path_around_tears(dig, eqs, forbidden, tears)
candidates -= loops
assert candidates, 'We are stuck in an infinite loop'
#plot_relax_solution(g, dig, eqs, forbidden, candidates)
#
# Has the UB improved?
new_ub = len(tears) + lb # already double-checked by digraph_to_dag
if new_ub < ub:
log('Improved UB: {} -> {}'.format(ub, new_ub))
feas_dag, match, ub = dig, get_matching(dig, eqs), new_ub
return candidates, feas_dag, match, ub
def plot_relax_solution(g, dag, eqs, forbidden, candidates):
# Also marks the missed loops (candidates) in red.
tears, _, order = get_solution(dag, eqs, forbidden)
rowp = [ r for r in order if r in eqs ]
rindex = { name : i for i, name in enumerate(rowp) }
#
colp = sorted(tears, key=lambda c: min(rindex[r] for r in g[c]))
seen = set(colp)
colp.extend( c for c in order if c not in eqs and c not in seen )
# Or, if we want the spiked form, uncomment the lines below:
#from order_util import get_row_col_perm
#rowp, colp = get_row_col_perm(eqs, dag, tears, sinks, order)
#
cindex = { name : i for i, name in enumerate(colp) }
mark_red = [ ]
for loop in candidates:
for a, b in edges_of_cycle(loop):
r, c = (a, b) if a in eqs else (b, a)
mark_red.append( (rindex[r], cindex[c]) )
#
msg, partitions = '', [ ]
plot_hessenberg(g, rowp, colp, partitions, msg, mark_red)
def solve_relaxation(g, eqs, forbidden, loops, match=None):
log()
log('The cycle matrix has', len(loops), 'rows')
m, y = build_ilp(g, eqs, forbidden, loops, match)
#dump(m, '.lp', increment=True)
success = solve_ilp(m)
#dump(m, '.sol')
assert success, 'Solver failures are not handled'
objective = int(round(m.getObjective().getValue()))
n_vars, n_matched_vars = len(g)-len(eqs), objective
lb = n_vars - n_matched_vars # unmatched vars are tear variables
matches = \
sorted(edge for edge, var in iteritems(y) if int(round(var.x))==1)
#log('matches:', matches)
return matches, lb
def dump(m, extension, increment=False):
m.update()
if not hasattr(dump, 'counter'): # emulating a static variable
dump.counter = 0
if increment:
dump.counter += 1
m.write('/tmp/relaxation_'+str(dump.counter)+extension)
def orient_wrt_matching(g_orig, eqs, relax_matching, lb):
dig = nx.DiGraph()
dig.add_nodes_from(g_orig)
matched_edges = { eq_var for eq_var in relax_matching }
for eq_var in g_orig.edges_iter(eqs):
u, v = eq_var if eq_var in matched_edges else (eq_var[1], eq_var[0])
dig.add_edge(u, v)
variables = sorted(n for n in g_orig if n not in eqs)
# all tears are sources, no other variable is source
sources = sorted(n for n,indeg in dig.in_degree_iter(variables) if indeg==0)
assert len(variables) == len(sources) + len(relax_matching)
assert lb == len(sources)
return dig
def matching_to_dag(g_orig, eqs, forbidden, rowp, colp, matches, tears, sinks):
matched_edges = set(edge for edge in iteritems(matches) if edge[0] in eqs)
len_matches = len(matched_edges)
assert not (matched_edges & forbidden)
dag = nx.DiGraph()
dag.add_nodes_from(rowp) # Empty (isolated) equations are allowed
#dag.add_nodes_from(variables)
for eq_var in g_orig.edges_iter(rowp):
u, v = eq_var if eq_var in matched_edges else (eq_var[1], eq_var[0])
dag.add_edge(u, v)
matched_edges.discard(eq_var)
assert not matched_edges
# FIXME Comparing str and int breaks on Py 3
has_all_nodes = sorted(dag, key=str) == sorted(g_orig, key=str)
assert has_all_nodes # Isolated (degree zero) var nodes?
assert nx.is_directed_acyclic_graph(dag)
# Check whether the matching is sane
assert len_matches == len(eqs) - len(sinks)
assert len_matches == len(g_orig) - len(eqs) - len(tears)
more_than_one_outedge = [ eq for eq in rowp if len(dag.succ[eq]) > 1 ]
assert not more_than_one_outedge, more_than_one_outedge
more_than_one_inedge = [var for var in colp if len(dag.pred[var]) > 1]
assert not more_than_one_inedge, more_than_one_inedge
return dag
def mfes_heuristic(dig, eqs):
objective, elims = run_mfes_heuristic(dig, try_one_cut=True)
##--- TODO Hack here to run with grb_pcm instead of the heuristic
#from grb_pcm import solve_problem as mfes_rigorous
#g = nx.DiGraph()
#for u, v in dig.edges_iter():
# g.add_edge(u, v, { 'weight' : 1, 'orig_edges' : [ (u,v) ] })
#elims, objective = mfes_rigorous(g)
##---
tears = sorted( v if v not in eqs else u for u,v in elims )
assert all(v not in eqs for v in tears)
assert objective == len(tears)
log('It is still necessary to guess', objective, 'variables')
if objective <= 5:
log(tears)
return tears
def digraph_to_dag(dig, eqs, tears, lb):
for v in tears:
eq_nbrs = list(dig.pred[v])
for e in eq_nbrs:
dig.remove_edge(e, v)
dig.add_edge(v, e)
# all tears are sources, no other variable is source
variables = sorted(n for n in dig if n not in eqs)
sources = sorted(n for n,indeg in dig.in_degree_iter(variables) if indeg==0)
# double-check the upper bound:
assert len(sources)==len(tears)+lb
def get_solution(dag, eqs, forbidden):
# Somewhat duplicate of get_final_order in min_degree
equations = sorted(eqs)
variables = sorted(n for n in dag if n not in eqs)
# all tears are sources, no other variable is source
sources = sorted(n for n,indeg in dag.in_degree_iter(variables) if indeg==0)
# all residuals are sinks, no other equation is sink
sinks = sorted(n for n, out in dag.out_degree_iter(equations) if out==0)
# do a topological sort;
# nbunch tries to break ties by ordering the equations alphabetically.
nbunch = list(reversed(equations))
nbunch.extend(reversed(variables))
order = deterministic_topological_sort(dag, nbunch)
# Sanity check: each equation is matched with a single allowed variable
resids = set(sinks)
non_sink_eqs = (eq for eq in order if eq in eqs and eq not in resids)
for eq in non_sink_eqs:
# eq must have exactly one allowed out edge in a valid elimination
(var,) = dag.succ[eq]
assert (eq,var) not in forbidden, (eq,var)
return sources, sinks, order
#-------------------------------------------------------------------------------
def build_ilp(g, eqs, forbidden, simple_cycles, match):
m = Model()
m.setAttr('ModelSense', GRB.MAXIMIZE)
edges = sorted(g.edges(eqs))
# y: is edge e in the matching? The worth of each allowed match is 1.
obj_coeff = { e : (1 if e not in forbidden else 0) for e in edges }
y = { e : m.addVar(vtype=GRB.BINARY, obj=obj_coeff[e]) for e in edges }
m.update()
# An equation is matched at most once
for eq in eqs:
add_matched_at_most_once_con(m, y, g.edges(eq))
# A variable is matched at most once
set_eqs = set(eqs)
vrs = sorted(v for v in g if v not in set_eqs)
for v in vrs:
add_matched_at_most_once_con(m, y, list((eq,v) for v,eq in g.edges(v)))
#
break_each_cycle_at_least_once(eqs, m, y, simple_cycles)
#
set_lower_bound_if_any(g, eqs, m, y)
#
# Set feasible solution from the matching, if any
if match is not None:
match = set(match)
for edge, var in iteritems(y):
var.setAttr('Start', 1 if edge in match else 0)
return m, y
def add_matched_at_most_once_con(m, y, edges):
y_in_con = [ y[e] for e in edges ]
lhs = LinExpr([1.0]*len(y_in_con), y_in_con)
m.addConstr(lhs, GRB.LESS_EQUAL, 1.0)
def break_each_cycle_at_least_once(eqs, m, y, simple_cycles):
equations = set(eqs)
for cyc in simple_cycles:
y_in_con = get_all_y_in_cycle(cyc, y, equations)
n_ys = len(y_in_con)
assert n_ys % 2 == 0, n_ys
matched_max = n_ys//2 - 1
lhs = LinExpr([1.0]*n_ys, y_in_con)
m.addConstr(lhs, GRB.LESS_EQUAL, matched_max)
def get_all_y_in_cycle(cyc, y, equations):
y_in_con = [ ]
for u,v in edges_of_cycle(cyc):
# y has (eq,var) as keys: Swap is necessary if u,v is in (var,eq) order
e = (u,v) if u in equations else (v,u)
y_in_con.append(y[e])
return y_in_con
def set_lower_bound_if_any(g, eqs, m, y):
lb = g.graph.get('trivial_lb')
if lb is None:
return
lhs = LinExpr([1]*len(y), list(itervalues(y)))
# See solve_relaxation: objective is to maximize the sum of matched
# variables, the lb is on the unmatched variables: n_vars - lb is an upper
# bound on the variables that can be matched at most
n_vars = len(g)-len(eqs)
m.addConstr(lhs, GRB.LESS_EQUAL, n_vars - lb)
#-------------------------------------------------------------------------------
# TODO Cleanup this code if it turns out that we actually need it. As it stands,
# it is inferior to the min-degree ordering.
def initial_subset(g, eqs, forbidden):
initial_loops = build_small_loops(g, eqs, forbidden)
# Remove those small loops that have forbidden edges anyway? Although
# it is very likely that the presolve in Gurobi will throw them away anyway
# so maybe it would be wasted developer time, not sure...
loops = loops_from_path_to_edge_repr(initial_loops, eqs)
log('Initially we have:', len(loops), 'loops')
# Subset selection with edge representation
start = time()
subset = select_subset_of_loops(g, eqs, loops, max_coverage=2)
end = time()
log('Number of selected loops:', len(subset))
log('Selection computed in {0:0.1f} s'.format(end-start))
# Convert the subset back to simple path representation
return convert_loops_from_edge_to_path_repr(subset)
def build_small_loops(g, eqs, forbidden):
# Build small loops around each edge
start = time()
initial_loops = set()
for eq,var in g.edges_iter(eqs):
if (eq,var) in forbidden:
continue
g.remove_edge(eq,var)
try:
simple_path = nx.shortest_path(g,var,eq)
initial_loops.add( to_normalized_path(simple_path, eqs) )
except nx.NetworkXNoPath:
pass # That's OK
g.add_edge(eq,var)
end = time()
log('Small loops computed in {0:0.1f} s'.format(end-start))
return initial_loops
def loops_from_path_to_edge_repr(initial_loops, eqs):
# Convert simple path representation to edge representation
start = time()
loops = [ ]
# The similar utils function is not good for bipartite setting
for l in initial_loops:
loops.append( tuple( (u,v) if u in eqs else (v,u)
for u,v in edges_of_cycle(l) ) )
end = time()
log('Edge representation computed in {0:0.1f} s'.format(end-start))
return loops
def convert_loops_from_edge_to_path_repr(loops_edge_repr):
# Convert the loops back to simple path representation
loops_path_repr = set()
for loop in loops_edge_repr:
spath = [ ]
for u, v in loop[::2]:
spath.append(u)
spath.append(v)
loops_path_repr.add( tuple(spath) )
# build_small_loops already normalized path
# assert tuple(spath) == to_normalized_path(spath, eqs)
return loops_path_repr
# TODO Mostly a duplicate of select_subset_of_loops in grb_pcm but adjusted to
# bipartite graphs
def select_subset_of_loops(g, eqs, small_loops, max_coverage=1):
# Select a maximum set of loops such that each edge participates in at most
# max_coverage loops. The max_coverage=1 means the loops are
# independent, they do not share edges. The max_coverage=2 means the
# selected loops share at most 1 edge, in other words, loops share edges at
# most pairwise.
m = Model()
m.setAttr('ModelSense', GRB.MAXIMIZE)
# * Bipartite, apart from that, duplicate of grb_pcm select_subset_of_loops*
edges = g.edges(eqs)
loop_vars = {loop:m.addVar(vtype=GRB.BINARY,obj=1) for loop in small_loops}
edge_vars = { edge : m.addVar(vtype=GRB.BINARY) for edge in edges }
m.update()
# An edge can participate in at most max_coverage loops
# TODO Takes very long to build the MILP; most likely e in loop which
# searches in linear time in the loop tuple.
start = time()
for e in edges:
in_loops = [var for loop,var in iteritems(loop_vars) if e in loop]
if in_loops:
lhs = LinExpr([1]*len(in_loops), in_loops)
m.addConstr(lhs, GRB.LESS_EQUAL, max_coverage)
end = time()
log('Building the ILP model took {0:0.1f} s'.format(end-start))
# If a loop is chosen, all of its edges are chosen
for cycle in small_loops:
for edge in cycle:
m.addConstr(edge_vars[edge], GRB.GREATER_EQUAL, loop_vars[cycle])
success = solve_ilp(m)
assert success, 'Solver failures are not handled'
objective = int(round(m.getObjective().getValue()))
loop_subset = {l for l,var in iteritems(loop_vars) if int(round(var.x))==1}
assert len(loop_subset)==objective
#log()
#log('Number of rows in the cycle matrix:', objective)
return loop_subset
#-------------------------------------------------------------------------------
if __name__ == '__main__':
main()
|
nozuono/calibre-webserver | refs/heads/master | src/html5lib/treebuilders/etree_lxml.py | 1724 | """Module for supporting the lxml.etree library. The idea here is to use as much
of the native library as possible, without using fragile hacks like custom element
names that break between releases. The downside of this is that we cannot represent
all possible trees; specifically the following are known to cause problems:
Text or comments as siblings of the root element
Docypes with no name
When any of these things occur, we emit a DataLossWarning
"""
from __future__ import absolute_import, division, unicode_literals
import warnings
import re
import sys
from . import _base
from ..constants import DataLossWarning
from .. import constants
from . import etree as etree_builders
from .. import ihatexml
import lxml.etree as etree
fullTree = True
tag_regexp = re.compile("{([^}]*)}(.*)")
comment_type = etree.Comment("asd").tag
class DocumentType(object):
def __init__(self, name, publicId, systemId):
self.name = name
self.publicId = publicId
self.systemId = systemId
class Document(object):
def __init__(self):
self._elementTree = None
self._childNodes = []
def appendChild(self, element):
self._elementTree.getroot().addnext(element._element)
def _getChildNodes(self):
return self._childNodes
childNodes = property(_getChildNodes)
def testSerializer(element):
rv = []
finalText = None
infosetFilter = ihatexml.InfosetFilter()
def serializeElement(element, indent=0):
if not hasattr(element, "tag"):
if hasattr(element, "getroot"):
# Full tree case
rv.append("#document")
if element.docinfo.internalDTD:
if not (element.docinfo.public_id or
element.docinfo.system_url):
dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name
else:
dtd_str = """<!DOCTYPE %s "%s" "%s">""" % (
element.docinfo.root_name,
element.docinfo.public_id,
element.docinfo.system_url)
rv.append("|%s%s" % (' ' * (indent + 2), dtd_str))
next_element = element.getroot()
while next_element.getprevious() is not None:
next_element = next_element.getprevious()
while next_element is not None:
serializeElement(next_element, indent + 2)
next_element = next_element.getnext()
elif isinstance(element, str) or isinstance(element, bytes):
# Text in a fragment
assert isinstance(element, str) or sys.version_info.major == 2
rv.append("|%s\"%s\"" % (' ' * indent, element))
else:
# Fragment case
rv.append("#document-fragment")
for next_element in element:
serializeElement(next_element, indent + 2)
elif element.tag == comment_type:
rv.append("|%s<!-- %s -->" % (' ' * indent, element.text))
if hasattr(element, "tail") and element.tail:
rv.append("|%s\"%s\"" % (' ' * indent, element.tail))
else:
assert isinstance(element, etree._Element)
nsmatch = etree_builders.tag_regexp.match(element.tag)
if nsmatch is not None:
ns = nsmatch.group(1)
tag = nsmatch.group(2)
prefix = constants.prefixes[ns]
rv.append("|%s<%s %s>" % (' ' * indent, prefix,
infosetFilter.fromXmlName(tag)))
else:
rv.append("|%s<%s>" % (' ' * indent,
infosetFilter.fromXmlName(element.tag)))
if hasattr(element, "attrib"):
attributes = []
for name, value in element.attrib.items():
nsmatch = tag_regexp.match(name)
if nsmatch is not None:
ns, name = nsmatch.groups()
name = infosetFilter.fromXmlName(name)
prefix = constants.prefixes[ns]
attr_string = "%s %s" % (prefix, name)
else:
attr_string = infosetFilter.fromXmlName(name)
attributes.append((attr_string, value))
for name, value in sorted(attributes):
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
if element.text:
rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
indent += 2
for child in element:
serializeElement(child, indent)
if hasattr(element, "tail") and element.tail:
rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail))
serializeElement(element, 0)
if finalText is not None:
rv.append("|%s\"%s\"" % (' ' * 2, finalText))
return "\n".join(rv)
def tostring(element):
"""Serialize an element and its child nodes to a string"""
rv = []
finalText = None
def serializeElement(element):
if not hasattr(element, "tag"):
if element.docinfo.internalDTD:
if element.docinfo.doctype:
dtd_str = element.docinfo.doctype
else:
dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name
rv.append(dtd_str)
serializeElement(element.getroot())
elif element.tag == comment_type:
rv.append("<!--%s-->" % (element.text,))
else:
# This is assumed to be an ordinary element
if not element.attrib:
rv.append("<%s>" % (element.tag,))
else:
attr = " ".join(["%s=\"%s\"" % (name, value)
for name, value in element.attrib.items()])
rv.append("<%s %s>" % (element.tag, attr))
if element.text:
rv.append(element.text)
for child in element:
serializeElement(child)
rv.append("</%s>" % (element.tag,))
if hasattr(element, "tail") and element.tail:
rv.append(element.tail)
serializeElement(element)
if finalText is not None:
rv.append("%s\"" % (' ' * 2, finalText))
return "".join(rv)
class TreeBuilder(_base.TreeBuilder):
documentClass = Document
doctypeClass = DocumentType
elementClass = None
commentClass = None
fragmentClass = Document
implementation = etree
def __init__(self, namespaceHTMLElements, fullTree=False):
builder = etree_builders.getETreeModule(etree, fullTree=fullTree)
infosetFilter = self.infosetFilter = ihatexml.InfosetFilter()
self.namespaceHTMLElements = namespaceHTMLElements
class Attributes(dict):
def __init__(self, element, value={}):
self._element = element
dict.__init__(self, value)
for key, value in self.items():
if isinstance(key, tuple):
name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1]))
else:
name = infosetFilter.coerceAttribute(key)
self._element._element.attrib[name] = value
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
if isinstance(key, tuple):
name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1]))
else:
name = infosetFilter.coerceAttribute(key)
self._element._element.attrib[name] = value
class Element(builder.Element):
def __init__(self, name, namespace):
name = infosetFilter.coerceElement(name)
builder.Element.__init__(self, name, namespace=namespace)
self._attributes = Attributes(self)
def _setName(self, name):
self._name = infosetFilter.coerceElement(name)
self._element.tag = self._getETreeTag(
self._name, self._namespace)
def _getName(self):
return infosetFilter.fromXmlName(self._name)
name = property(_getName, _setName)
def _getAttributes(self):
return self._attributes
def _setAttributes(self, attributes):
self._attributes = Attributes(self, attributes)
attributes = property(_getAttributes, _setAttributes)
def insertText(self, data, insertBefore=None):
data = infosetFilter.coerceCharacters(data)
builder.Element.insertText(self, data, insertBefore)
def appendChild(self, child):
builder.Element.appendChild(self, child)
class Comment(builder.Comment):
def __init__(self, data):
data = infosetFilter.coerceComment(data)
builder.Comment.__init__(self, data)
def _setData(self, data):
data = infosetFilter.coerceComment(data)
self._element.text = data
def _getData(self):
return self._element.text
data = property(_getData, _setData)
self.elementClass = Element
self.commentClass = builder.Comment
# self.fragmentClass = builder.DocumentFragment
_base.TreeBuilder.__init__(self, namespaceHTMLElements)
def reset(self):
_base.TreeBuilder.reset(self)
self.insertComment = self.insertCommentInitial
self.initial_comments = []
self.doctype = None
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
if fullTree:
return self.document._elementTree
else:
return self.document._elementTree.getroot()
def getFragment(self):
fragment = []
element = self.openElements[0]._element
if element.text:
fragment.append(element.text)
fragment.extend(list(element))
if element.tail:
fragment.append(element.tail)
return fragment
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
if not name:
warnings.warn("lxml cannot represent empty doctype", DataLossWarning)
self.doctype = None
else:
coercedName = self.infosetFilter.coerceElement(name)
if coercedName != name:
warnings.warn("lxml cannot represent non-xml doctype", DataLossWarning)
doctype = self.doctypeClass(coercedName, publicId, systemId)
self.doctype = doctype
def insertCommentInitial(self, data, parent=None):
self.initial_comments.append(data)
def insertCommentMain(self, data, parent=None):
if (parent == self.document and
self.document._elementTree.getroot()[-1].tag == comment_type):
warnings.warn("lxml cannot represent adjacent comments beyond the root elements", DataLossWarning)
super(TreeBuilder, self).insertComment(data, parent)
def insertRoot(self, token):
"""Create the document root"""
# Because of the way libxml2 works, it doesn't seem to be possible to
# alter information like the doctype after the tree has been parsed.
# Therefore we need to use the built-in parser to create our iniial
# tree, after which we can add elements like normal
docStr = ""
if self.doctype:
assert self.doctype.name
docStr += "<!DOCTYPE %s" % self.doctype.name
if (self.doctype.publicId is not None or
self.doctype.systemId is not None):
docStr += (' PUBLIC "%s" ' %
(self.infosetFilter.coercePubid(self.doctype.publicId or "")))
if self.doctype.systemId:
sysid = self.doctype.systemId
if sysid.find("'") >= 0 and sysid.find('"') >= 0:
warnings.warn("DOCTYPE system cannot contain single and double quotes", DataLossWarning)
sysid = sysid.replace("'", 'U00027')
if sysid.find("'") >= 0:
docStr += '"%s"' % sysid
else:
docStr += "'%s'" % sysid
else:
docStr += "''"
docStr += ">"
if self.doctype.name != token["name"]:
warnings.warn("lxml cannot represent doctype with a different name to the root element", DataLossWarning)
docStr += "<THIS_SHOULD_NEVER_APPEAR_PUBLICLY/>"
root = etree.fromstring(docStr)
# Append the initial comments:
for comment_token in self.initial_comments:
root.addprevious(etree.Comment(comment_token["data"]))
# Create the root document and add the ElementTree to it
self.document = self.documentClass()
self.document._elementTree = root.getroottree()
# Give the root element the right name
name = token["name"]
namespace = token.get("namespace", self.defaultNamespace)
if namespace is None:
etree_tag = name
else:
etree_tag = "{%s}%s" % (namespace, name)
root.tag = etree_tag
# Add the root element to the internal child/open data structures
root_element = self.elementClass(name, namespace)
root_element._element = root
self.document._childNodes.append(root_element)
self.openElements.append(root_element)
# Reset to the default insert comment function
self.insertComment = self.insertCommentMain
|
vmware/pyvmomi-tools | refs/heads/master | pyvmomi_tools/cli/cursor.py | 4 | # Copyright (c) 2014 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "VMware, Inc."
import sys
def _create_char_spinner():
"""Creates a generator yielding a char based spinner.
"""
while True:
for c in '|/-\\':
yield c
_spinner = _create_char_spinner()
def spinner(label=''):
"""Prints label with a spinner.
When called repeatedly from inside a loop this prints
a one line CLI spinner.
"""
sys.stdout.write("\r\t%s %s" % (label, _spinner.next()))
sys.stdout.flush()
|
andrius-preimantas/odoo | refs/heads/master | addons/hr_timesheet_invoice/report/report_analytic.py | 299 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp import tools
from openerp.addons.decimal_precision import decimal_precision as dp
class report_analytic_account_close(osv.osv):
_name = "report.analytic.account.close"
_description = "Analytic account to close"
_auto = False
_columns = {
'name': fields.many2one('account.analytic.account', 'Analytic account', readonly=True),
'state': fields.char('Status', readonly=True),
'partner_id': fields.many2one('res.partner', 'Partner', readonly=True),
'quantity': fields.float('Quantity', readonly=True),
'quantity_max': fields.float('Max. Quantity', readonly=True),
'balance': fields.float('Balance', readonly=True),
'date_deadline': fields.date('Deadline', readonly=True),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'report_analytic_account_close')
cr.execute("""
create or replace view report_analytic_account_close as (
select
a.id as id,
a.id as name,
a.state as state,
sum(l.unit_amount) as quantity,
sum(l.amount) as balance,
a.partner_id as partner_id,
a.quantity_max as quantity_max,
a.date as date_deadline
from
account_analytic_line l
right join
account_analytic_account a on (l.account_id=a.id)
group by
a.id,a.state, a.quantity_max,a.date,a.partner_id
having
(a.quantity_max>0 and (sum(l.unit_amount)>=a.quantity_max)) or
a.date <= current_date
)""")
class report_account_analytic_line_to_invoice(osv.osv):
_name = "report.account.analytic.line.to.invoice"
_description = "Analytic lines to invoice report"
_auto = False
_columns = {
'name': fields.char('Year', required=False, readonly=True),
'product_id':fields.many2one('product.product', 'Product', readonly=True),
'account_id':fields.many2one('account.analytic.account', 'Analytic account', readonly=True),
'product_uom_id':fields.many2one('product.uom', 'Unit of Measure', readonly=True),
'unit_amount': fields.float('Units', readonly=True),
'sale_price': fields.float('Sale price', readonly=True, digits_compute=dp.get_precision('Product Price')),
'amount': fields.float('Amount', readonly=True, digits_compute=dp.get_precision('Account')),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')],'Month', readonly=True),
}
_order = 'name desc, product_id asc, account_id asc'
def init(self, cr):
tools.drop_view_if_exists(cr, 'report_account_analytic_line_to_invoice')
cr.execute("""
CREATE OR REPLACE VIEW report_account_analytic_line_to_invoice AS (
SELECT
DISTINCT(to_char(l.date,'MM')) as month,
to_char(l.date, 'YYYY') as name,
MIN(l.id) AS id,
l.product_id,
l.account_id,
SUM(l.amount) AS amount,
SUM(l.unit_amount*t.list_price) AS sale_price,
SUM(l.unit_amount) AS unit_amount,
l.product_uom_id
FROM
account_analytic_line l
left join
product_product p on (l.product_id=p.id)
left join
product_template t on (p.product_tmpl_id=t.id)
WHERE
(invoice_id IS NULL) and (to_invoice IS NOT NULL)
GROUP BY
to_char(l.date, 'YYYY'), to_char(l.date,'MM'), product_id, product_uom_id, account_id
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
uclouvain/osis | refs/heads/dev | base/migrations/0028_auto_20160412_1219.py | 3 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-04-12 10:19
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0027_auto_20160406_1706'),
]
operations = [
migrations.AlterField(
model_name='examenrollment',
name='encoding_status',
field=models.CharField(blank=True, choices=[('SAVED', 'saved'), ('SUBMITTED', 'submitted')], max_length=9, null=True),
),
migrations.AlterField(
model_name='examenrollment',
name='justification_draft',
field=models.CharField(blank=True, choices=[('ABSENT', 'absent'), ('CHEATING', 'cheating'), ('ILL', 'ill'), ('JUSTIFIED_ABSENCE', 'justified_absence'), ('SCORE_MISSING', 'score_missing')], max_length=20, null=True),
),
migrations.AlterField(
model_name='examenrollment',
name='justification_final',
field=models.CharField(blank=True, choices=[('ABSENT', 'absent'), ('CHEATING', 'cheating'), ('ILL', 'ill'), ('JUSTIFIED_ABSENCE', 'justified_absence'), ('SCORE_MISSING', 'score_missing')], max_length=20, null=True),
),
migrations.AlterField(
model_name='examenrollment',
name='justification_reencoded',
field=models.CharField(blank=True, choices=[('ABSENT', 'absent'), ('CHEATING', 'cheating'), ('ILL', 'ill'), ('JUSTIFIED_ABSENCE', 'justified_absence'), ('SCORE_MISSING', 'score_missing')], max_length=20, null=True),
),
migrations.AlterField(
model_name='examenrollmenthistory',
name='justification_final',
field=models.CharField(choices=[('ABSENT', 'absent'), ('CHEATING', 'cheating'), ('ILL', 'ill'), ('JUSTIFIED_ABSENCE', 'justified_absence'), ('SCORE_MISSING', 'score_missing')], max_length=20, null=True),
),
migrations.AlterField(
model_name='messagetemplate',
name='format',
field=models.CharField(choices=[('PLAIN', 'plain'), ('HTML', 'HTML'), ('PLAIN_HTML', 'plain_and_html')], max_length=15),
),
migrations.AlterField(
model_name='offeryear',
name='grade',
field=models.CharField(blank=True, choices=[('BACHELOR', 'bachelor'), ('MASTER', 'master'), ('DOCTORATE', 'ph_d')], max_length=20, null=True),
),
migrations.AlterField(
model_name='person',
name='gender',
field=models.CharField(blank=True, choices=[('F', 'female'), ('M', 'male'), ('U', 'unknown')], default='U', max_length=1, null=True),
),
migrations.AlterField(
model_name='sessionexam',
name='status',
field=models.CharField(choices=[('IDLE', 'idle'), ('OPEN', 'open'), ('CLOSED', 'closed')], max_length=10),
),
]
|
marqueedev/django | refs/heads/master | tests/template_tests/filter_tests/test_time.py | 326 | from datetime import time
from django.template.defaultfilters import time as time_filter
from django.test import SimpleTestCase
from django.utils import timezone
from ..utils import setup
from .timezone_utils import TimezoneTestCase
class TimeTests(TimezoneTestCase):
"""
#20693: Timezone support for the time template filter
"""
@setup({'time01': '{{ dt|time:"e:O:T:Z" }}'})
def test_time01(self):
output = self.engine.render_to_string('time01', {'dt': self.now_tz_i})
self.assertEqual(output, '+0315:+0315:+0315:11700')
@setup({'time02': '{{ dt|time:"e:T" }}'})
def test_time02(self):
output = self.engine.render_to_string('time02', {'dt': self.now})
self.assertEqual(output, ':' + self.now_tz.tzinfo.tzname(self.now_tz))
@setup({'time03': '{{ t|time:"P:e:O:T:Z" }}'})
def test_time03(self):
output = self.engine.render_to_string('time03', {'t': time(4, 0, tzinfo=timezone.get_fixed_timezone(30))})
self.assertEqual(output, '4 a.m.::::')
@setup({'time04': '{{ t|time:"P:e:O:T:Z" }}'})
def test_time04(self):
output = self.engine.render_to_string('time04', {'t': time(4, 0)})
self.assertEqual(output, '4 a.m.::::')
@setup({'time05': '{{ d|time:"P:e:O:T:Z" }}'})
def test_time05(self):
output = self.engine.render_to_string('time05', {'d': self.today})
self.assertEqual(output, '')
@setup({'time06': '{{ obj|time:"P:e:O:T:Z" }}'})
def test_time06(self):
output = self.engine.render_to_string('time06', {'obj': 'non-datetime-value'})
self.assertEqual(output, '')
class FunctionTests(SimpleTestCase):
def test_inputs(self):
self.assertEqual(time_filter(time(13), 'h'), '01')
self.assertEqual(time_filter(time(0), 'h'), '12')
|
smajda/django-nopassword | refs/heads/master | docs/conf.py | 3 | # -*- coding: utf-8 -*-
#
# django_nopassword documentation build configuration file, created by
# sphinx-quickstart on Thu Feb 27 20:16:47 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# import sys
# import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django_nopassword'
copyright = u'2014, Rolf Erik Lekang'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.6'
# The full version, including alpha/beta/rc tags.
release = '0.6.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django_nopassworddoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'django_nopassword.tex', u'django\\_nopassword Documentation',
u'Rolf Erik Lekang', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django_nopassword', u'django_nopassword Documentation',
[u'Rolf Erik Lekang'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django_nopassword', u'django_nopassword Documentation',
u'Rolf Erik Lekang', 'django_nopassword', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
|
hethapu/python-twitter | refs/heads/master | setup.py | 17 | #!/usr/bin/env python
#
# Copyright 2007-2014 The Python-Twitter Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''The setup and build script for the python-twitter library.'''
import os
from setuptools import setup, find_packages
def read(*paths):
"""Build a file path from *paths* and return the contents."""
with open(os.path.join(*paths), 'r') as f:
return f.read()
setup(
name='python-twitter',
version='2.3',
author='The Python-Twitter Developers',
author_email='python-twitter@googlegroups.com',
license='Apache License 2.0',
url='https://github.com/bear/python-twitter',
keywords='twitter api',
description='A Python wrapper around the Twitter API',
long_description=(read('README.rst') + '\n\n' +
read('AUTHORS.rst') + '\n\n' +
read('CHANGES')),
packages=find_packages(exclude=['tests*']),
install_requires=['requests', 'requests-oauthlib'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Communications :: Chat',
'Topic :: Internet',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
],
)
|
lokirius/python-for-android | refs/heads/master | python-build/python-libs/gdata/build/lib/gdata/base/service.py | 166 | #!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GBaseService extends the GDataService to streamline Google Base operations.
GBaseService: Provides methods to query feeds and manipulate items. Extends
GDataService.
DictionaryToParamList: Function which converts a dictionary into a list of
URL arguments (represented as strings). This is a
utility function used in CRUD operations.
"""
__author__ = 'api.jscudder (Jeffrey Scudder)'
import urllib
import gdata
import atom.service
import gdata.service
import gdata.base
import atom
# URL to which all batch requests are sent.
BASE_BATCH_URL = 'http://www.google.com/base/feeds/items/batch'
class Error(Exception):
pass
class RequestError(Error):
pass
class GBaseService(gdata.service.GDataService):
"""Client for the Google Base service."""
def __init__(self, email=None, password=None, source=None,
server='base.google.com', api_key=None, additional_headers=None,
handler=None, **kwargs):
"""Creates a client for the Google Base service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'base.google.com'.
api_key: string (optional) The Google Base API key to use.
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
gdata.service.GDataService.__init__(
self, email=email, password=password, service='gbase', source=source,
server=server, additional_headers=additional_headers, handler=handler,
**kwargs)
self.api_key = api_key
def _SetAPIKey(self, api_key):
if not isinstance(self.additional_headers, dict):
self.additional_headers = {}
self.additional_headers['X-Google-Key'] = api_key
def __SetAPIKey(self, api_key):
self._SetAPIKey(api_key)
def _GetAPIKey(self):
if 'X-Google-Key' not in self.additional_headers:
return None
else:
return self.additional_headers['X-Google-Key']
def __GetAPIKey(self):
return self._GetAPIKey()
api_key = property(__GetAPIKey, __SetAPIKey,
doc="""Get or set the API key to be included in all requests.""")
def Query(self, uri, converter=None):
"""Performs a style query and returns a resulting feed or entry.
Args:
uri: string The full URI which be queried. Examples include
'/base/feeds/snippets?bq=digital+camera',
'http://www.google.com/base/feeds/snippets?bq=digital+camera'
'/base/feeds/items'
I recommend creating a URI using a query class.
converter: func (optional) A function which will be executed on the
server's response. Examples include GBaseItemFromString, etc.
Returns:
If converter was specified, returns the results of calling converter on
the server's response. If converter was not specified, and the result
was an Atom Entry, returns a GBaseItem, by default, the method returns
the result of calling gdata.service's Get method.
"""
result = self.Get(uri, converter=converter)
if converter:
return result
elif isinstance(result, atom.Entry):
return gdata.base.GBaseItemFromString(result.ToString())
return result
def QuerySnippetsFeed(self, uri):
return self.Get(uri, converter=gdata.base.GBaseSnippetFeedFromString)
def QueryItemsFeed(self, uri):
return self.Get(uri, converter=gdata.base.GBaseItemFeedFromString)
def QueryAttributesFeed(self, uri):
return self.Get(uri, converter=gdata.base.GBaseAttributesFeedFromString)
def QueryItemTypesFeed(self, uri):
return self.Get(uri, converter=gdata.base.GBaseItemTypesFeedFromString)
def QueryLocalesFeed(self, uri):
return self.Get(uri, converter=gdata.base.GBaseLocalesFeedFromString)
def GetItem(self, uri):
return self.Get(uri, converter=gdata.base.GBaseItemFromString)
def GetSnippet(self, uri):
return self.Get(uri, converter=gdata.base.GBaseSnippetFromString)
def GetAttribute(self, uri):
return self.Get(uri, converter=gdata.base.GBaseAttributeEntryFromString)
def GetItemType(self, uri):
return self.Get(uri, converter=gdata.base.GBaseItemTypeEntryFromString)
def GetLocale(self, uri):
return self.Get(uri, converter=gdata.base.GDataEntryFromString)
def InsertItem(self, new_item, url_params=None, escape_params=True,
converter=None):
"""Adds an item to Google Base.
Args:
new_item: atom.Entry or subclass A new item which is to be added to
Google Base.
url_params: dict (optional) Additional URL parameters to be included
in the insertion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
converter: func (optional) Function which is executed on the server's
response before it is returned. Usually this is a function like
GBaseItemFromString which will parse the response and turn it into
an object.
Returns:
If converter is defined, the results of running converter on the server's
response. Otherwise, it will be a GBaseItem.
"""
response = self.Post(new_item, '/base/feeds/items', url_params=url_params,
escape_params=escape_params, converter=converter)
if not converter and isinstance(response, atom.Entry):
return gdata.base.GBaseItemFromString(response.ToString())
return response
def DeleteItem(self, item_id, url_params=None, escape_params=True):
"""Removes an item with the specified ID from Google Base.
Args:
item_id: string The ID of the item to be deleted. Example:
'http://www.google.com/base/feeds/items/13185446517496042648'
url_params: dict (optional) Additional URL parameters to be included
in the deletion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
True if the delete succeeded.
"""
return self.Delete('%s' % (item_id[len('http://www.google.com'):],),
url_params=url_params, escape_params=escape_params)
def UpdateItem(self, item_id, updated_item, url_params=None,
escape_params=True,
converter=gdata.base.GBaseItemFromString):
"""Updates an existing item.
Args:
item_id: string The ID of the item to be updated. Example:
'http://www.google.com/base/feeds/items/13185446517496042648'
updated_item: atom.Entry, subclass, or string, containing
the Atom Entry which will replace the base item which is
stored at the item_id.
url_params: dict (optional) Additional URL parameters to be included
in the update request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
converter: func (optional) Function which is executed on the server's
response before it is returned. Usually this is a function like
GBaseItemFromString which will parse the response and turn it into
an object.
Returns:
If converter is defined, the results of running converter on the server's
response. Otherwise, it will be a GBaseItem.
"""
response = self.Put(updated_item,
item_id, url_params=url_params, escape_params=escape_params,
converter=converter)
if not converter and isinstance(response, atom.Entry):
return gdata.base.GBaseItemFromString(response.ToString())
return response
def ExecuteBatch(self, batch_feed,
converter=gdata.base.GBaseItemFeedFromString):
"""Sends a batch request feed to the server.
Args:
batch_feed: gdata.BatchFeed A feed containing BatchEntry elements which
contain the desired CRUD operation and any necessary entry data.
converter: Function (optional) Function to be executed on the server's
response. This function should take one string as a parameter. The
default value is GBaseItemFeedFromString which will turn the result
into a gdata.base.GBaseItem object.
Returns:
A gdata.BatchFeed containing the results.
"""
return self.Post(batch_feed, BASE_BATCH_URL, converter=converter)
class BaseQuery(gdata.service.Query):
def _GetBaseQuery(self):
return self['bq']
def _SetBaseQuery(self, base_query):
self['bq'] = base_query
bq = property(_GetBaseQuery, _SetBaseQuery,
doc="""The bq query parameter""")
|
sogelink/ansible | refs/heads/devel | lib/ansible/modules/network/nxos/nxos_snmp_host.py | 10 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_snmp_host
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages SNMP host configuration.
description:
- Manages SNMP host configuration parameters.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- C(state=absent) removes the host configuration if it is configured.
options:
snmp_host:
description:
- IP address of hostname of target host.
required: true
version:
description:
- SNMP version.
required: false
default: v2c
choices: ['v2c', 'v3']
community:
description:
- Community string or v3 username.
required: false
default: null
udp:
description:
- UDP port number (0-65535).
required: false
default: null
type:
description:
- type of message to send to host.
required: false
default: traps
choices: ['trap', 'inform']
vrf:
description:
- VRF to use to source traffic to source.
required: false
default: null
vrf_filter:
description:
- Name of VRF to filter.
required: false
default: null
src_intf:
description:
- Source interface.
required: false
default: null
state:
description:
- Manage the state of the resource.
required: true
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# ensure snmp host is configured
- nxos_snmp_host:
snmp_host: 3.3.3.3
community: TESTING
state: present
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["snmp-server host 3.3.3.3 filter-vrf another_test_vrf"]
'''
from ansible.module_utils.nxos import load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
def execute_show_command(command, module):
command = {
'command': command,
'output': 'json',
}
return run_commands(module, command)
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = str(value)
else:
new_dict[new_key] = value
return new_dict
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_snmp_host(host, module):
body = execute_show_command('show snmp host', module)
host_map = {
'port': 'udp',
'version': 'version',
'level': 'v3',
'type': 'snmp_type',
'secname': 'community'
}
host_map_5k = {
'port': 'udp',
'version': 'version',
'sec_level': 'v3',
'notif_type': 'snmp_type',
'commun_or_user': 'community'
}
resource = {}
if body:
try:
resource_table = body[0]['TABLE_host']['ROW_host']
if isinstance(resource_table, dict):
resource_table = [resource_table]
for each in resource_table:
key = str(each['host'])
src = each.get('src_intf')
host_resource = apply_key_map(host_map, each)
if src:
host_resource['src_intf'] = src.split(':')[1].strip()
vrf_filt = each.get('TABLE_vrf_filters')
if vrf_filt:
vrf_filter = vrf_filt['ROW_vrf_filters']['vrf_filter'].split(':')[1].split(',')
filters = [vrf.strip() for vrf in vrf_filter]
host_resource['vrf_filter'] = filters
vrf = each.get('vrf')
if vrf:
host_resource['vrf'] = vrf.split(':')[1].strip()
resource[key] = host_resource
except KeyError:
# Handle the 5K case
try:
resource_table = body[0]['TABLE_hosts']['ROW_hosts']
if isinstance(resource_table, dict):
resource_table = [resource_table]
for each in resource_table:
key = str(each['address'])
src = each.get('src_intf')
host_resource = apply_key_map(host_map_5k, each)
if src:
host_resource['src_intf'] = src.split(':')[1].strip()
vrf_filt = each.get('TABLE_filter_vrf')
if vrf_filt:
vrf_filter = vrf_filt['ROW_filter_vrf']['filter_vrf_name'].split(',')
filters = [vrf.strip() for vrf in vrf_filter]
host_resource['vrf_filter'] = filters
vrf = each.get('use_vrf_name')
if vrf:
host_resource['vrf'] = vrf.strip()
resource[key] = host_resource
except (KeyError, AttributeError, TypeError):
return resource
except (AttributeError, TypeError):
return resource
find = resource.get(host)
if find:
fix_find = {}
for (key, value) in find.items():
if isinstance(value, str):
fix_find[key] = value.strip()
else:
fix_find[key] = value
return fix_find
return {}
def remove_snmp_host(host, existing):
commands = []
if existing['version'] == 'v3':
existing['version'] = '3'
command = 'no snmp-server host {0} {snmp_type} version \
{version} {v3} {community}'.format(host, **existing)
elif existing['version'] == 'v2c':
existing['version'] = '2c'
command = 'no snmp-server host {0} {snmp_type} version \
{version} {community}'.format(host, **existing)
if command:
commands.append(command)
return commands
def config_snmp_host(delta, proposed, existing, module):
commands = []
command_builder = []
host = proposed['snmp_host']
cmd = 'snmp-server host {0}'.format(proposed['snmp_host'])
snmp_type = delta.get('snmp_type')
version = delta.get('version')
ver = delta.get('v3')
community = delta.get('community')
command_builder.append(cmd)
if any([snmp_type, version, ver, community]):
type_string = snmp_type or existing.get('type')
if type_string:
command_builder.append(type_string)
version = version or existing.get('version')
if version:
if version == 'v2c':
vn = '2c'
elif version == 'v3':
vn = '3'
version_string = 'version {0}'.format(vn)
command_builder.append(version_string)
if ver:
ver_string = ver or existing.get('v3')
command_builder.append(ver_string)
if community:
community_string = community or existing.get('community')
command_builder.append(community_string)
cmd = ' '.join(command_builder)
commands.append(cmd)
CMDS = {
'vrf_filter': 'snmp-server host {0} filter-vrf {vrf_filter}',
'vrf': 'snmp-server host {0} use-vrf {vrf}',
'udp': 'snmp-server host {0} udp-port {udp}',
'src_intf': 'snmp-server host {0} source-interface {src_intf}'
}
for key in delta:
command = CMDS.get(key)
if command:
cmd = command.format(host, **delta)
commands.append(cmd)
return commands
def main():
argument_spec = dict(
snmp_host=dict(required=True, type='str'),
community=dict(type='str'),
udp=dict(type='str'),
version=dict(choices=['v2c', 'v3'], default='v2c'),
src_intf=dict(type='str'),
v3=dict(choices=['noauth', 'auth', 'priv']),
vrf_filter=dict(type='str'),
vrf=dict(type='str'),
snmp_type=dict(choices=['trap', 'inform'], default='trap'),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
warnings = list()
check_args(module, warnings)
results = {'changed': False, 'commands': [], 'warnings': warnings}
snmp_host = module.params['snmp_host']
community = module.params['community']
udp = module.params['udp']
version = module.params['version']
src_intf = module.params['src_intf']
v3 = module.params['v3']
vrf_filter = module.params['vrf_filter']
vrf = module.params['vrf']
snmp_type = module.params['snmp_type']
state = module.params['state']
if snmp_type == 'inform' and version != 'v3':
module.fail_json(msg='inform requires snmp v3')
if version == 'v2c' and v3:
module.fail_json(msg='param: "v3" should not be used when '
'using version v2c')
if not any([vrf_filter, vrf, udp, src_intf]):
if not all([snmp_type, version, community]):
module.fail_json(msg='when not configuring options like '
'vrf_filter, vrf, udp, and src_intf,'
'the following params are required: '
'type, version, community')
if version == 'v3' and v3 is None:
module.fail_json(msg='when using version=v3, the param v3 '
'(options: auth, noauth, priv) is also required')
existing = get_snmp_host(snmp_host, module)
# existing returns the list of vrfs configured for a given host
# checking to see if the proposed is in the list
store = existing.get('vrf_filter')
if existing and store:
if vrf_filter not in existing['vrf_filter']:
existing['vrf_filter'] = None
else:
existing['vrf_filter'] = vrf_filter
commands = []
if state == 'absent' and existing:
command = remove_snmp_host(snmp_host, existing)
commands.append(command)
elif state == 'present':
args = dict(
community=community,
snmp_host=snmp_host,
udp=udp,
version=version,
src_intf=src_intf,
vrf_filter=vrf_filter,
v3=v3,
vrf=vrf,
snmp_type=snmp_type
)
proposed = dict((k, v) for k, v in args.items() if v is not None)
delta = dict(set(proposed.items()).difference(existing.items()))
if delta:
command = config_snmp_host(delta, proposed, existing, module)
commands.append(command)
cmds = flatten_list(commands)
if cmds:
results['changed'] = True
if not module.check_mode:
load_config(module, cmds)
if 'configure' in cmds:
cmds.pop(0)
results['commands'] = cmds
module.exit_json(**results)
if __name__ == '__main__':
main()
|
cuckoobox/cuckoo | refs/heads/master | tests/test_resultserver.py | 1 | # Copyright (C) 2017 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import logging
import mock
import pytest
import tempfile
from cuckoo.common.exceptions import CuckooOperationalError
from cuckoo.core.log import task_log_start, task_log_stop
from cuckoo.core.resultserver import ResultHandler, FileUpload
from cuckoo.core.startup import init_logging
from cuckoo.main import cuckoo_create
from cuckoo.misc import mkdir, set_cwd, cwd
@mock.patch("cuckoo.core.resultserver.select")
def test_open_process_log_unicode(p):
set_cwd(tempfile.mkdtemp())
cuckoo_create()
mkdir(cwd(analysis=1))
mkdir(cwd("logs", analysis=1))
request = server = mock.MagicMock()
class Handler(ResultHandler):
storagepath = cwd(analysis=1)
def handle(self):
pass
init_logging(logging.DEBUG)
try:
task_log_start(1)
Handler(request, (None, None), server).open_process_log({
"pid": 1, "ppid": 2, "process_name": u"\u202e", "track": True,
})
finally:
task_log_stop(1)
class TestFileUpload(object):
def fileupload(self, handler):
set_cwd(tempfile.mkdtemp())
cuckoo_create()
mkdir(cwd(analysis=1))
mkdir(cwd("logs", analysis=1))
handler.storagepath = cwd(analysis=1)
fu = FileUpload(handler, None)
fu.init()
for x in fu:
pass
fu.close()
def test_success(self):
class Handler(object):
reads = [
"this", "is", "a", "test", None
]
def read_newline(self, strip):
return "logs/1.log"
def read_any(self):
return self.reads.pop(0)
self.fileupload(Handler())
with open(cwd("logs", "1.log", analysis=1), "rb") as f:
assert f.read() == "thisisatest"
def invalid_path(self, path):
class Handler(object):
def read_newline(self, strip):
return path
with pytest.raises(CuckooOperationalError) as e:
self.fileupload(Handler())
e.match("banned path")
def test_invalid_paths(self):
self.invalid_path("/tmp/foobar")
self.invalid_path("../hello")
self.invalid_path("../../foobar")
|
kwailamchan/programming-languages | refs/heads/master | cpp/deeplearning/caffe/scripts/copy_notebook.py | 75 | #!/usr/bin/env python
"""
Takes as arguments:
1. the path to a JSON file (such as an IPython notebook).
2. the path to output file
If 'metadata' dict in the JSON file contains 'include_in_docs': true,
then copies the file to output file, appending the 'metadata' property
as YAML front-matter, adding the field 'category' with value 'notebook'.
"""
import os
import sys
import json
filename = sys.argv[1]
output_filename = sys.argv[2]
content = json.load(open(filename))
if 'include_in_docs' in content['metadata'] and content['metadata']['include_in_docs']:
yaml_frontmatter = ['---']
for key, val in content['metadata'].iteritems():
if key == 'example_name':
key = 'title'
if val == '':
val = os.path.basename(filename)
yaml_frontmatter.append('{}: {}'.format(key, val))
yaml_frontmatter += ['category: notebook']
yaml_frontmatter += ['original_path: ' + filename]
with open(output_filename, 'w') as fo:
fo.write('\n'.join(yaml_frontmatter + ['---']) + '\n')
fo.write(open(filename).read())
|
TeamLovely/django-migrations-plus | refs/heads/master | migrations_plus/tests.py | 1 | from django.db import connection
from django.db import router, DEFAULT_DB_ALIAS
from django.db.backends.sqlite3.schema import DatabaseSchemaEditor
from django.db.migrations.migration import Migration
from django.db.migrations.state import ProjectState
from django.test import TestCase
from unittest.mock import patch
import migrations_plus
class TestRouter(object):
# A test router. The behavior is vaguely primary/replica, but the
# databases aren't assumed to propagate changes.
def __init__(self):
super().__init__()
def db_for_read(self, model, instance=None, **hints):
if instance:
return instance._state.db or 'other'
return 'other'
def db_for_write(self, model, **hints):
return DEFAULT_DB_ALIAS
def allow_relation(self, obj1, obj2, **hints):
return obj1._state.db in ('default', 'other') and obj2._state.db in ('default', 'other')
def allow_migrate(self, db, model):
return True
class TestRunSQL(TestCase):
multi_db = True
def setUp(self):
self.old_routers = router.routers
router.routers = [TestRouter()]
def tearDown(self):
router.routers = self.old_routers
def apply_operations(self, app_label, project_state, operations):
migration = Migration('name', app_label)
migration.operations = operations
with connection.schema_editor() as editor:
return migration.apply(project_state, editor)
def set_up_test_model(self, app_label):
return self.apply_operations(app_label, ProjectState(), [])
def test_default_database(self):
"""
Tests the RunSQL operation with 'default' database
"""
statement = 'SELECT 1;'
project_state = self.set_up_test_model('test_app')
operation = migrations_plus.RunSQL(statement)
new_state = project_state.clone()
operation.state_forwards('test_app', new_state)
with connection.schema_editor() as editor:
with patch.object(DatabaseSchemaEditor, 'execute') as patch_schema_editor:
operation.database_forwards('test_app', editor, project_state, new_state)
patch_schema_editor.assert_called_once_with(statement)
def test_other_database(self):
"""
Tests the RunSQL operation with 'other' database
"""
statement = 'SELECT 1;'
project_state = self.set_up_test_model('test_app')
operation = migrations_plus.RunSQL(statement, db='other')
new_state = project_state.clone()
operation.state_forwards('test_app', new_state)
with connection.schema_editor() as editor:
with patch.object(DatabaseSchemaEditor, 'execute') as patch_schema_editor:
editor.connection.alias = 'other'
operation.database_forwards('test_app', editor, project_state, new_state)
patch_schema_editor.assert_called_once_with(statement)
editor.connection.alias = 'default'
def test_database_not_called(self):
"""
Tests the RunSQL operation checking that the wrong DB is not called
"""
statement = 'SELECT 1;'
project_state = self.set_up_test_model('test_app')
operation = migrations_plus.RunSQL(statement, db='other')
new_state = project_state.clone()
operation.state_forwards('test_app', new_state)
with connection.schema_editor() as editor:
with patch.object(DatabaseSchemaEditor, 'execute') as patch_schema_editor:
operation.database_forwards('test_app', editor, project_state, new_state)
assert not patch_schema_editor.called
project_state = self.set_up_test_model('test_app')
operation = migrations_plus.RunSQL(statement)
new_state = project_state.clone()
operation.state_forwards('test_app', new_state)
with connection.schema_editor() as editor:
with patch.object(DatabaseSchemaEditor, 'execute') as patch_schema_editor:
editor.connection.alias = 'other'
operation.database_forwards('test_app', editor, project_state, new_state)
assert not patch_schema_editor.called
editor.connection.alias = 'default'
|
canvasnetworks/canvas | refs/heads/master | website/canvas/migrations/0147_auto__add_field_commentsticker_epic_message.py | 2 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CommentSticker.epic_message'
db.add_column('canvas_commentsticker', 'epic_message', self.gf('django.db.models.fields.CharField')(default='', max_length=140, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'CommentSticker.epic_message'
db.delete_column('canvas_commentsticker', 'epic_message')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'canvas.apiapp': {
'Meta': {'object_name': 'APIApp'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'canvas.apiauthtoken': {
'Meta': {'unique_together': "(('user', 'app'),)", 'object_name': 'APIAuthToken'},
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.APIApp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.bestof': {
'Meta': {'object_name': 'BestOf'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'best_of'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Category']"}),
'chosen_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'best_of'", 'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {})
},
'canvas.category': {
'Meta': {'object_name': 'Category'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'founded': ('django.db.models.fields.FloatField', [], {'default': '1298956320'}),
'founder': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'founded_groups'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderators': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'moderated_categories'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.comment': {
'Meta': {'object_name': 'Comment'},
'anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'judged': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ot_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'replies'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Comment']"}),
'parent_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'replied_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}),
'reply_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'used_in_comments'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'reply_text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0', 'db_index': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.commentflag': {
'Meta': {'object_name': 'CommentFlag'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {}),
'undone': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': "orm['auth.User']"})
},
'canvas.commentmoderationlog': {
'Meta': {'object_name': 'CommentModerationLog'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderated_comments_log'", 'to': "orm['auth.User']"}),
'visibility': ('django.db.models.fields.IntegerField', [], {})
},
'canvas.commentpin': {
'Meta': {'object_name': 'CommentPin'},
'auto': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.commentsticker': {
'Meta': {'object_name': 'CommentSticker'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stickers'", 'to': "orm['canvas.Comment']"}),
'epic_message': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '140', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'canvas.content': {
'Meta': {'object_name': 'Content'},
'alpha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'animated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'remix_of': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'remixes'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'remix_text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'source_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4000', 'blank': 'True'}),
'stamps_used': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'used_as_stamp'", 'blank': 'True', 'to': "orm['canvas.Content']"}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'url_mapping': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.ContentUrlMapping']", 'null': 'True', 'blank': 'True'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.contenturlmapping': {
'Meta': {'object_name': 'ContentUrlMapping'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'canvas.emailunsubscribe': {
'Meta': {'object_name': 'EmailUnsubscribe'},
'email': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'canvas.externalcontent': {
'Meta': {'object_name': 'ExternalContent'},
'_data': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'content_type': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'external_content'", 'to': "orm['canvas.Comment']"}),
'source_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4000', 'null': 'True', 'blank': 'True'})
},
'canvas.facebookinvite': {
'Meta': {'object_name': 'FacebookInvite'},
'fb_message_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invited_fbid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'invitee': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'facebook_invited_from'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'inviter': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'facebook_sent_invites'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"})
},
'canvas.facebookuser': {
'Meta': {'object_name': 'FacebookUser'},
'email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'fb_uid': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'gender': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_invited': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'canvas.followcategory': {
'Meta': {'unique_together': "(('user', 'category'),)", 'object_name': 'FollowCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'followers'", 'to': "orm['canvas.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'following'", 'to': "orm['auth.User']"})
},
'canvas.invitecode': {
'Meta': {'object_name': 'InviteCode'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invitee': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'invited_from'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'inviter': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'sent_invites'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"})
},
'canvas.remixplugin': {
'Meta': {'object_name': 'RemixPlugin'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
's3md5': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'})
},
'canvas.stashcontent': {
'Meta': {'object_name': 'StashContent'},
'content': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Content']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.userinfo': {
'Meta': {'object_name': 'UserInfo'},
'free_invites': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invite_bypass': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'is_qa': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'post_anonymously': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'canvas.usermoderationlog': {
'Meta': {'object_name': 'UserModerationLog'},
'action': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderation_log'", 'to': "orm['auth.User']"})
},
'canvas.userwarning': {
'Meta': {'object_name': 'UserWarning'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}),
'confirmed': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'custom_message': ('django.db.models.fields.TextField', [], {}),
'disable_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issued': ('canvas.util.UnixTimestampField', [], {}),
'stock_message': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_warnings'", 'to': "orm['auth.User']"}),
'viewed': ('canvas.util.UnixTimestampField', [], {'default': '0'})
},
'canvas.welcomeemailrecipient': {
'Meta': {'object_name': 'WelcomeEmailRecipient'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'canvas_auth.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'", '_ormbases': ['auth.User'], 'proxy': 'True'}
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['canvas']
|
rosihorrorshow/askbot-devel | refs/heads/master | askbot/migrations/0002_auto__add_field_answer_text__chg_field_answer_html__add_field_question.py | 20 | # encoding: utf-8
import os
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Answer.text'
db.add_column(u'answer', 'text', self.gf('django.db.models.fields.TextField')(null=True), keep_default=False)
# Changing field 'Answer.html'
db.alter_column(u'answer', 'html', self.gf('django.db.models.fields.TextField')(null=True))
# Adding field 'Question.text'
db.add_column(u'question', 'text', self.gf('django.db.models.fields.TextField')(null=True), keep_default=False)
# Changing field 'Question.html'
db.alter_column(u'question', 'html', self.gf('django.db.models.fields.TextField')(null=True))
def backwards(self, orm):
# Deleting field 'Answer.text'
db.delete_column(u'answer', 'text')
# Changing field 'Answer.html'
db.alter_column(u'answer', 'html', self.gf('django.db.models.fields.TextField')())
# Deleting field 'Question.text'
db.delete_column(u'question', 'text')
# Changing field 'Question.html'
db.alter_column(u'question', 'html', self.gf('django.db.models.fields.TextField')())
app_dir_name = os.path.basename(os.path.dirname(os.path.dirname(__file__)))
if app_dir_name == 'forum':
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'hide_ignored_questions': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'tag_filter_setting': ('django.db.models.fields.CharField', [], {'default': "'ignored'", 'max_length': '16'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'forum.activity': {
'Meta': {'object_name': 'Activity', 'db_table': "u'activity'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'activity_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auditted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'forum.anonymousanswer': {
'Meta': {'object_name': 'AnonymousAnswer'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'anonymous_answers'", 'to': "orm['forum.Question']"}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'forum.anonymousquestion': {
'Meta': {'object_name': 'AnonymousQuestion'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'forum.answer': {
'Meta': {'object_name': 'Answer', 'db_table': "u'answer'"},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['forum.Question']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'forum.answerrevision': {
'Meta': {'object_name': 'AnswerRevision', 'db_table': "u'answer_revision'"},
'answer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['forum.Answer']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answerrevisions'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
},
'forum.authkeyuserassociation': {
'Meta': {'object_name': 'AuthKeyUserAssociation'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'auth_keys'", 'to': "orm['auth.User']"})
},
'forum.award': {
'Meta': {'object_name': 'Award', 'db_table': "u'award'"},
'awarded_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_badge'", 'to': "orm['forum.Badge']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notified': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_user'", 'to': "orm['auth.User']"})
},
'forum.badge': {
'Meta': {'unique_together': "(('name', 'type'),)", 'object_name': 'Badge', 'db_table': "u'badge'"},
'awarded_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'awarded_to': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'badges'", 'through': "'Award'", 'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'multiple': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'type': ('django.db.models.fields.SmallIntegerField', [], {})
},
'forum.book': {
'Meta': {'object_name': 'Book', 'db_table': "u'book'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cover_img': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {}),
'pages': ('django.db.models.fields.SmallIntegerField', [], {}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '6', 'decimal_places': '2'}),
'publication': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'published_at': ('django.db.models.fields.DateTimeField', [], {}),
'questions': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'book'", 'db_table': "'book_question'", 'to': "orm['forum.Question']"}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'forum.bookauthorinfo': {
'Meta': {'object_name': 'BookAuthorInfo', 'db_table': "u'book_author_info'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {}),
'blog_url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'book': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forum.Book']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'forum.bookauthorrss': {
'Meta': {'object_name': 'BookAuthorRss', 'db_table': "u'book_author_rss'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {}),
'book': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forum.Book']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rss_created_at': ('django.db.models.fields.DateTimeField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'forum.comment': {
'Meta': {'object_name': 'Comment', 'db_table': "u'comment'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['auth.User']"})
},
'forum.emailfeedsetting': {
'Meta': {'object_name': 'EmailFeedSetting'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reported_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'forum.favoritequestion': {
'Meta': {'object_name': 'FavoriteQuestion', 'db_table': "u'favorite_question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forum.Question']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_favorite_questions'", 'to': "orm['auth.User']"})
},
'forum.flaggeditem': {
'Meta': {'unique_together': "(('content_type', 'object_id', 'user'),)", 'object_name': 'FlaggedItem', 'db_table': "u'flagged_item'"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'flagged_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flaggeditems'", 'to': "orm['auth.User']"})
},
'forum.markedtag': {
'Meta': {'object_name': 'MarkedTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_selections'", 'to': "orm['forum.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tag_selections'", 'to': "orm['auth.User']"})
},
'forum.question': {
'Meta': {'object_name': 'Question', 'db_table': "u'question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'answer_accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'answer_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'to': "orm['auth.User']"}),
'close_reason': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'closed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'closed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'closed_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'favorited_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'favorite_questions'", 'through': "'FavoriteQuestion'", 'to': "orm['auth.User']"}),
'favourite_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'followed_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followed_questions'", 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_activity_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_active_in_questions'", 'to': "orm['auth.User']"}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'questions'", 'to': "orm['forum.Tag']"}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'forum.questionrevision': {
'Meta': {'object_name': 'QuestionRevision', 'db_table': "u'question_revision'"},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questionrevisions'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['forum.Question']"}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'forum.questionview': {
'Meta': {'object_name': 'QuestionView'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'viewed'", 'to': "orm['forum.Question']"}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_views'", 'to': "orm['auth.User']"})
},
'forum.repute': {
'Meta': {'object_name': 'Repute', 'db_table': "u'repute'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'negative': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'positive': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forum.Question']"}),
'reputation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'reputation_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'reputed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'forum.tag': {
'Meta': {'object_name': 'Tag', 'db_table': "u'tag'"},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_tags'", 'to': "orm['auth.User']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_tags'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'used_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'forum.validationhash': {
'Meta': {'unique_together': "(('user', 'type'),)", 'object_name': 'ValidationHash'},
'expiration': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2010, 4, 25, 16, 21, 32, 856067)'}),
'hash_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'seed': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'forum.vote': {
'Meta': {'unique_together': "(('content_type', 'object_id', 'user'),)", 'object_name': 'Vote', 'db_table': "u'vote'"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['auth.User']"}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {}),
'voted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'})
}
}
else:
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'hide_ignored_questions': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'tag_filter_setting': ('django.db.models.fields.CharField', [], {'default': "'ignored'", 'max_length': '16'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'askbot.activity': {
'Meta': {'object_name': 'Activity', 'db_table': "u'activity'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'activity_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auditted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.anonymousanswer': {
'Meta': {'object_name': 'AnonymousAnswer'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'anonymous_answers'", 'to': "orm['askbot.Question']"}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'askbot.anonymousquestion': {
'Meta': {'object_name': 'AnonymousQuestion'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'askbot.answer': {
'Meta': {'object_name': 'Answer', 'db_table': "u'answer'"},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['askbot.Question']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.answerrevision': {
'Meta': {'object_name': 'AnswerRevision', 'db_table': "u'answer_revision'"},
'answer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['askbot.Answer']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answerrevisions'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
},
'askbot.authkeyuserassociation': {
'Meta': {'object_name': 'AuthKeyUserAssociation'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'auth_keys'", 'to': "orm['auth.User']"})
},
'askbot.award': {
'Meta': {'object_name': 'Award', 'db_table': "u'award'"},
'awarded_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_badge'", 'to': "orm['askbot.Badge']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notified': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_user'", 'to': "orm['auth.User']"})
},
'askbot.badge': {
'Meta': {'unique_together': "(('name', 'type'),)", 'object_name': 'Badge', 'db_table': "u'badge'"},
'awarded_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'awarded_to': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'badges'", 'through': "'Award'", 'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'multiple': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'type': ('django.db.models.fields.SmallIntegerField', [], {})
},
'askbot.book': {
'Meta': {'object_name': 'Book', 'db_table': "u'book'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cover_img': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {}),
'pages': ('django.db.models.fields.SmallIntegerField', [], {}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '6', 'decimal_places': '2'}),
'publication': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'published_at': ('django.db.models.fields.DateTimeField', [], {}),
'questions': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'book'", 'db_table': "'book_question'", 'to': "orm['askbot.Question']"}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.bookauthorinfo': {
'Meta': {'object_name': 'BookAuthorInfo', 'db_table': "u'book_author_info'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {}),
'blog_url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'book': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Book']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.bookauthorrss': {
'Meta': {'object_name': 'BookAuthorRss', 'db_table': "u'book_author_rss'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {}),
'book': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Book']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rss_created_at': ('django.db.models.fields.DateTimeField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.comment': {
'Meta': {'object_name': 'Comment', 'db_table': "u'comment'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['auth.User']"})
},
'askbot.emailfeedsetting': {
'Meta': {'object_name': 'EmailFeedSetting'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reported_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.favoritequestion': {
'Meta': {'object_name': 'FavoriteQuestion', 'db_table': "u'favorite_question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Question']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_favorite_questions'", 'to': "orm['auth.User']"})
},
'askbot.flaggeditem': {
'Meta': {'unique_together': "(('content_type', 'object_id', 'user'),)", 'object_name': 'FlaggedItem', 'db_table': "u'flagged_item'"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'flagged_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flaggeditems'", 'to': "orm['auth.User']"})
},
'askbot.markedtag': {
'Meta': {'object_name': 'MarkedTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_selections'", 'to': "orm['askbot.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tag_selections'", 'to': "orm['auth.User']"})
},
'askbot.question': {
'Meta': {'object_name': 'Question', 'db_table': "u'question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'answer_accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'answer_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'to': "orm['auth.User']"}),
'close_reason': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'closed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'closed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'closed_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'favorited_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'favorite_questions'", 'through': "'FavoriteQuestion'", 'to': "orm['auth.User']"}),
'favourite_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'followed_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followed_questions'", 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_activity_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_active_in_questions'", 'to': "orm['auth.User']"}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'questions'", 'to': "orm['askbot.Tag']"}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.questionrevision': {
'Meta': {'object_name': 'QuestionRevision', 'db_table': "u'question_revision'"},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questionrevisions'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['askbot.Question']"}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'askbot.questionview': {
'Meta': {'object_name': 'QuestionView'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'viewed'", 'to': "orm['askbot.Question']"}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_views'", 'to': "orm['auth.User']"})
},
'askbot.repute': {
'Meta': {'object_name': 'Repute', 'db_table': "u'repute'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'negative': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'positive': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Question']"}),
'reputation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'reputation_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'reputed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.tag': {
'Meta': {'object_name': 'Tag', 'db_table': "u'tag'"},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_tags'", 'to': "orm['auth.User']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_tags'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'used_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.validationhash': {
'Meta': {'unique_together': "(('user', 'type'),)", 'object_name': 'ValidationHash'},
'expiration': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2010, 4, 25, 16, 21, 32, 856067)'}),
'hash_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'seed': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.vote': {
'Meta': {'unique_together': "(('content_type', 'object_id', 'user'),)", 'object_name': 'Vote', 'db_table': "u'vote'"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['auth.User']"}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {}),
'voted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'})
}
}
complete_apps = [app_dir_name]
|
hjhhh3000/FirstBlood | refs/heads/master | ChWSeg/PreTreatment/SentenceSeg.py | 1 | '''
File: SentenceSeg.py
'''
'''
Hydrogenium
12/14/2014
'''
'''
Chinese Sentence Segmentation v0.51
Update date: 12/19/2014
'''
#-*- coding:utf-8 -*-
def SentenceSeg(Origin):
'''
This function turns the original input into a list of sentences.
'''
def IsAWord(Left,Right):
'''
Judge whether the contents in paired punctuations are words.
'''
for i in range(Left+1,Right):
if (Origin[i] in "。,;:!?"):
return False
return True
Sentences = []
i = 0
Word = []
while i < len(Origin):
if (Origin[i] in "\n\f\000\t\r "): #Delete EOLN & EOF, etc.
Origin = Origin[:i]+Origin[i+1:]
elif (Origin[i] in "。,;:!?()“”‘’—《》"):
if (Origin[i] in "(“‘"): #Figure out the pair of punctuations
if (Origin[i] == "("):
j = i+1
while (Origin[j] != ")"):
j += 1
Word.append(IsAWord(i,j))
elif (Origin[i] == "“"):
j = i+1
while (Origin[j] != "”"):
j += 1
Word.append(IsAWord(i,j))
else:
j = i+1
while (Origin[j] != "’"):
j += 1
Word.append(IsAWord(i,j))
if (Origin[i] in "(“‘"): #Deal with “”‘’() and their nesting
if ((i > 0)and(not(Origin[i-1] in "(“‘"))and(not(Word[-1]))):
Sentences.append(Origin[:i])
Origin = Origin[i:]
i = 0
else:
i += 1
elif (Origin[i] in ")”’"):
if (i == len(Origin)-1):
Sentences.append(Origin)
Origin = []
i = 0
else:
if ((Origin[i+1] in "。,;:!?()“”‘’—")or(Word[-1])):
i += 1
else:
Sentences.append(Origin[:i+1])
Origin = Origin[i+1:]
i = 0
Word.pop()
elif (Origin[i] == "《"): #Deal with 《》. Consider all the contents in 《》 are not sentences.
while (Origin[i] != "》"):
i += 1
i += 1
elif (Origin[i] == "—"): #Deal with ——
Sentences.append(Origin[:i+2])
Origin = Origin[i+2:]
i = 0
elif (Origin[i] in ",:"): #Deal with something like 123,456,789 and 18:00
if ((Origin[i-1] in "0123456789")and(Origin[i+1] in "0123456789")):
i += 1
else:
Sentences.append(Origin[:i+1])
Origin = Origin[i+1:]
i = 0
else:
if ((i == len(Origin)-1)or(not(Origin[i+1] in "。,;:!?)”’—"))):
Sentences.append(Origin[:i+1])
Origin = Origin[i+1:]
i = 0
else:
i += 1
else:
i += 1
return Sentences
'''
Update Log:
v0.1
12/14/2014
Complete the initial version.
v0.15
12/15/2014
1.Fix some errors.
2.Initial the log.
v0.16
12/16/2014
Fix some errors.
v0.5
12/16/2014
1.Solve the problem: The contents in paired punctuations are always treated as sentences.
2.Fix some errors.
v0.51
12/19/2014
1.Fix an error: Contents in '《》' are not always treated as words.
2.Fix an error: : and , between digits are separated.
''' |
audoe/ztq | refs/heads/master | ztq_core/ztq_core/async.py | 3 | # -*- encoding:utf-8 -*-
import types
from task import register, push_task, has_task, gen_task, push_buffer_task
import transaction
use_transaction = False
def _setup_callback(kw):
callback = kw.pop('ztq_callback', None)
if callback is not None:
callback_func, callback_args, callback_kw = callback
callback_queue = callback_kw.pop('ztq_queue', callback_func._ztq_queue)
kw.update({'ztq_callback':"%s:%s" % (callback_queue, callback_func.__raw__.__name__),
'ztq_callback_args':callback_args,
'ztq_callback_kw':callback_kw})
fcallback = kw.pop('ztq_fcallback', None)
if fcallback is not None:
callback_func, callback_args, callback_kw = fcallback
callback_queue = callback_kw.pop('ztq_queue', callback_func._ztq_queue)
kw.update({'ztq_fcallback':"%s:%s" % (callback_queue, callback_func.__raw__.__name__),
'ztq_fcallback_args':callback_args,
'ztq_fcallback_kw':callback_kw})
pcallback = kw.pop('ztq_pcallback', None)
if pcallback is not None:
callback_func, callback_args, callback_kw = pcallback
callback_queue = callback_kw.pop('ztq_queue', callback_func._ztq_queue)
kw.update({'ztq_pcallback':"%s:%s" % (callback_queue, callback_func.__raw__.__name__),
'ztq_pcallback_args':callback_args,
'ztq_pcallback_kw':callback_kw})
def push_task_to_queue(task_name, args, kw, on_commit=False, buffer=False):
if on_commit:
if buffer:
add_after_commit_hook(push_buffer_task, (task_name,) + args, kw)
else:
add_after_commit_hook(push_task, (task_name,) + args, kw)
else:
if buffer:
push_buffer_task(task_name, *args, **kw)
else:
push_task(task_name, *args, **kw)
def async(*_args, **_kw):
""" 这是一个decorator,事务提交的时候,提交到job队列,异步执行
定义job
=============
第一种::
@async
def say_hello(name):
print 'hello, ', name
第二种, 预先指定队列执行信息::
@async(queue='hello_queue', transaction=True)
def say_hello(name):
print 'hello, ', name
使用方法
================
支持如下几种::
say_hello('asdfa')
say_hello('asdfa', ztq_queue="asdfa", ztq_transaction=False)
"""
if len(_args) == 1 and not _kw and isinstance(_args[0], types.FunctionType): # 不带参数的形式
func = _args[0]
def new_func1(*args, **kw):
queue_name = kw.pop('ztq_queue', 'default')
buffer = kw.pop('ztq_buffer', False)
on_commit= kw.pop('ztq_transaction', use_transaction)
task_name = "%s:%s" % (queue_name, func.__name__)
_setup_callback(kw)
push_task_to_queue(task_name, args, kw, on_commit=on_commit, buffer=buffer)
new_func1.__raw__ = func
new_func1._ztq_queue = 'default'
register(func)
return new_func1
else:
_queue_name = _kw.get('queue', 'default')
def _async(func):
def new_func(*args, **kw):
#on_commit= kw.pop('ztq_transaction', _on_commit)
on_commit= kw.pop('ztq_transaction', use_transaction)
queue_name = kw.pop('ztq_queue', _queue_name)
buffer = kw.pop('ztq_buffer', False)
task_name = "%s:%s" % (queue_name, func.__name__)
_setup_callback(kw)
push_task_to_queue(task_name, args, kw, on_commit=on_commit, buffer=buffer)
new_func.__raw__ = func
new_func._ztq_queue = _queue_name
register(func)
return new_func
return _async
def prepare_task(func, *args, **kw):
_setup_callback(kw)
return func, args, kw
def ping_task(func, *args, **kw):
queue_name = kw.pop('ztq_queue', func._ztq_queue)
to_front = kw.pop('ztq_first', False)
on_commit = kw.pop('ztq_transaction', None)
run = kw.pop('ztq_run', False)
task = gen_task(func.__raw__.__name__, *args, **kw)
result = has_task(queue_name, task, to_front=to_front)
if result == 'none' and run:
kw['ztq_queue'] = queue_name
kw['ztq_first'] = to_front
if on_commit is not None:
kw['ztq_transaction'] = on_commit
func(*args, **kw)
return result
#### 以下代码让队列的任务支持事务
def enable_transaction(enable):
""" 是否支持transaction, 默认不支持 """
global use_transaction
use_transaction = bool(enable)
def _run_after_commit(success_commit, func, args, kw):
if success_commit:
func(*args, **kw)
def add_after_commit_hook(func, args, kw):
""" 在事务最后添加一个钩子,让队列任务在事务完成后才做实际的操作
"""
if not use_transaction: return
transaction.get().addAfterCommitHook(
_run_after_commit,
(func, args, kw),
)
|
icloudrnd/automation_tools | refs/heads/master | openstack_dashboard/dashboards/router/nexus1000v/forms.py | 35 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
LOG = logging.getLogger(__name__)
def get_tenant_choices(request):
tenant_choices = [('', _("Select a project"))]
tenants = []
try:
tenants, has_more = api.keystone.tenant_list(request)
except Exception:
msg = _('Projects could not be retrieved.')
exceptions.handle(request, msg)
for tenant in tenants:
if tenant.enabled:
tenant_choices.append((tenant.id, tenant.name))
return tenant_choices
class CreateNetworkProfile(forms.SelfHandlingForm):
"""Create Network Profile form."""
name = forms.CharField(max_length=255,
label=_("Name"))
segment_type = forms.ChoiceField(label=_('Segment Type'),
choices=[('vlan', _('VLAN')),
('overlay', _('Overlay')),
('trunk', _('Trunk'))],
widget=forms.Select
(attrs={'class': 'switchable',
'data-slug': 'segtype'}))
# Sub type options available for Overlay segment type
sub_type = forms.ChoiceField(label=_('Sub Type'),
choices=[('native_vxlan', _('Native VXLAN')),
('enhanced', _('Enhanced VXLAN')),
('other', _('Other'))],
required=False,
widget=forms.Select
(attrs={'class': 'switchable switched',
'data-slug': 'subtype',
'data-switch-on': 'segtype',
'data-segtype-overlay':
_("Sub Type")}))
# Sub type options available for Trunk segment type
sub_type_trunk = forms.ChoiceField(label=_('Sub Type'),
choices=[('vlan', _('VLAN'))],
required=False,
widget=forms.Select
(attrs={'class': 'switched',
'data-switch-on': 'segtype',
'data-segtype-trunk':
_("Sub Type")}))
segment_range = forms.CharField(max_length=255,
label=_("Segment Range"),
required=False,
widget=forms.TextInput
(attrs={'class': 'switched',
'data-switch-on': 'segtype',
'data-segtype-vlan':
_("Segment Range"),
'data-segtype-overlay':
_("Segment Range")}),
help_text=_("1-4093 for VLAN; "
"5000 and above for Overlay"))
multicast_ip_range = forms.CharField(max_length=30,
label=_("Multicast IP Range"),
required=False,
widget=forms.TextInput
(attrs={'class': 'switched',
'data-switch-on':
'subtype',
'data-subtype-native_vxlan':
_("Multicast IP Range")}),
help_text=_("Multicast IPv4 range"
"(e.g. 224.0.1.0-"
"224.0.1.100)"))
other_subtype = forms.CharField(max_length=255,
label=_("Sub Type Value (Manual Input)"),
required=False,
widget=forms.TextInput
(attrs={'class': 'switched',
'data-switch-on':
'subtype',
'data-subtype-other':
_("Sub Type Value "
"(Manual Input)")}),
help_text=_("Enter parameter (e.g. GRE)"))
physical_network = forms.CharField(max_length=255,
label=_("Physical Network"),
required=False,
widget=forms.TextInput
(attrs={'class': 'switched',
'data-switch-on': 'segtype',
'data-segtype-vlan':
_("Physical Network")}))
project = forms.ChoiceField(label=_("Project"),
required=False)
def __init__(self, request, *args, **kwargs):
super(CreateNetworkProfile, self).__init__(request, *args, **kwargs)
self.fields['project'].choices = get_tenant_choices(request)
def clean(self):
# If sub_type is 'other' or 'trunk' then
# assign this new value for sub_type
cleaned_data = super(CreateNetworkProfile, self).clean()
segment_type = cleaned_data.get('segment_type')
if segment_type == 'overlay':
sub_type = cleaned_data.get('sub_type')
if sub_type == 'other':
other_subtype = cleaned_data.get('other_subtype')
cleaned_data['sub_type'] = other_subtype
LOG.debug('subtype is now %(params)s',
{'params': other_subtype})
elif segment_type == 'trunk':
sub_type_trunk = cleaned_data.get('sub_type_trunk')
cleaned_data['sub_type'] = sub_type_trunk
LOG.debug('subtype is now %(params)s',
{'params': sub_type_trunk})
return cleaned_data
def handle(self, request, data):
try:
LOG.debug('request = %(req)s, params = %(params)s',
{'req': request, 'params': data})
params = {'name': data['name'],
'segment_type': data['segment_type'],
'sub_type': data['sub_type'],
'segment_range': data['segment_range'],
'physical_network': data['physical_network'],
'multicast_ip_range': data['multicast_ip_range'],
'tenant_id': data['project']}
profile = api.neutron.profile_create(request,
**params)
msg = _('Network Profile %s '
'was successfully created.') % data['name']
LOG.debug(msg)
messages.success(request, msg)
return profile
except Exception:
redirect = reverse('horizon:router:nexus1000v:index')
msg = _('Failed to create network profile %s') % data['name']
exceptions.handle(request, msg, redirect=redirect)
class UpdateNetworkProfile(CreateNetworkProfile):
"""Update Network Profile form."""
profile_id = forms.CharField(label=_("ID"),
widget=forms.HiddenInput())
project = forms.CharField(label=_("Project"), required=False)
def __init__(self, request, *args, **kwargs):
super(UpdateNetworkProfile, self).__init__(request, *args, **kwargs)
self.fields['segment_type'].widget.attrs['readonly'] = 'readonly'
self.fields['sub_type'].widget.attrs['readonly'] = 'readonly'
self.fields['sub_type_trunk'].widget.attrs['readonly'] = 'readonly'
self.fields['other_subtype'].widget.attrs['readonly'] = 'readonly'
self.fields['physical_network'].widget.attrs['readonly'] = 'readonly'
self.fields['project'].widget.attrs['readonly'] = 'readonly'
def handle(self, request, data):
try:
LOG.debug('request = %(req)s, params = %(params)s',
{'req': request, 'params': data})
params = {'name': data['name'],
'segment_range': data['segment_range'],
'multicast_ip_range': data['multicast_ip_range']}
profile = api.neutron.profile_update(
request,
data['profile_id'],
**params
)
msg = _('Network Profile %s '
'was successfully updated.') % data['name']
LOG.debug(msg)
messages.success(request, msg)
return profile
except Exception:
msg = _('Failed to update '
'network profile (%s).') % data['name']
redirect = reverse('horizon:router:nexus1000v:index')
exceptions.handle(request, msg, redirect=redirect)
return False
|
rlutz/xorn | refs/heads/master | tests/python/snippets/xml_writer.py | 1 | # Copyright (C) 2013-2019 Roland Lutz
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import sys
import xorn.xml_writer
f = sys.stdout
## [use XMLWriter]
w = xorn.xml_writer.XMLWriter(f.write)
w.start_element('document')
w.write_attribute('attribute', 'value')
w.write_character_data('some text')
w.end_element()
assert w.is_done()
## [use XMLWriter]
|
rpavlik/chromium | refs/heads/cmake-fixing | crserverlib/server_simpleget.py | 4 | # Copyright (c) 2001, Stanford University
# All rights reserved.
#
# See the file LICENSE.txt for information on redistributing this software.
import sys
sys.path.append( "../glapi_parser" )
import apiutil
apiutil.CopyrightC()
print """#include "cr_spu.h"
#include "chromium.h"
#include "cr_error.h"
#include "cr_mem.h"
#include "cr_net.h"
#include "server_dispatch.h"
#include "server.h"
"""
from get_sizes import *;
funcs = [ 'GetIntegerv', 'GetFloatv', 'GetDoublev', 'GetBooleanv' ]
types = [ 'GLint', 'GLfloat', 'GLdouble', 'GLboolean' ]
for index in range(len(funcs)):
func_name = funcs[index]
params = apiutil.Parameters(func_name)
print 'void SERVER_DISPATCH_APIENTRY crServerDispatch%s( %s )' % ( func_name, apiutil.MakeDeclarationString(params))
print '{'
print '\t%s *get_values;' % types[index]
print '\tint tablesize = __numValues( pname ) * sizeof(%s);' % types[index]
print '\t(void) params;'
print '\tget_values = (%s *) crAlloc( tablesize );' % types[index]
print '\tcr_server.head_spu->dispatch_table.%s( pname, get_values );' % func_name
print '\tcrServerReturnValue( get_values, tablesize );'
print '\tcrFree(get_values);'
print '}\n'
|
zadgroup/edx-platform | refs/heads/master | common/lib/capa/capa/safe_exec/lazymod.py | 193 | """A module proxy for delayed importing of modules.
From http://barnesc.blogspot.com/2006/06/automatic-python-imports-with-autoimp.html,
in the public domain.
"""
import sys
class LazyModule(object):
"""A lazy module proxy."""
def __init__(self, modname):
self.__dict__['__name__'] = modname
self._set_mod(None)
def _set_mod(self, mod):
if mod is not None:
self.__dict__ = mod.__dict__
self.__dict__['_lazymod_mod'] = mod
def _load_mod(self):
__import__(self.__name__)
self._set_mod(sys.modules[self.__name__])
def __getattr__(self, name):
if self.__dict__['_lazymod_mod'] is None:
self._load_mod()
mod = self.__dict__['_lazymod_mod']
if hasattr(mod, name):
return getattr(mod, name)
else:
try:
subname = '%s.%s' % (self.__name__, name)
__import__(subname)
submod = getattr(mod, name)
except ImportError:
raise AttributeError("'module' object has no attribute %r" % name)
self.__dict__[name] = LazyModule(subname)
return self.__dict__[name]
|
popeye123/ytcc | refs/heads/master | ytcc/exceptions.py | 1 | # ytcc - The YouTube channel checker
# Copyright (C) 2019 Wolfgang Popp
#
# This file is part of ytcc.
#
# ytcc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ytcc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ytcc. If not, see <http://www.gnu.org/licenses/>.
"""Exceptions in their own module to avoid circular imports."""
class YtccException(Exception):
"""A general parent class of all Exceptions that are used in Ytcc."""
class BadURLException(YtccException):
"""Raised when a given URL does not refer to a YouTube channel."""
class DuplicateChannelException(YtccException):
"""Raised when trying to subscribe to a channel the second (or more) time."""
class ChannelDoesNotExistException(YtccException):
"""Raised when the url of a given channel does not exist."""
class InvalidSubscriptionFileError(YtccException):
"""Raised when the given file is not a valid XML file."""
class BadConfigException(YtccException):
"""Raised when error in config file is encountered."""
class DatabaseOperationalError(YtccException):
"""Raised when database is locked and no operations can be performed.
Can happen when two instances of ytcc are running at the same time.
"""
|
Hubert51/AutoGrading | refs/heads/master | learning/web_Haotian/venv/Lib/site-packages/pip/_vendor/html5lib/treeadapters/genshi.py | 356 | from __future__ import absolute_import, division, unicode_literals
from genshi.core import QName, Attrs
from genshi.core import START, END, TEXT, COMMENT, DOCTYPE
def to_genshi(walker):
text = []
for token in walker:
type = token["type"]
if type in ("Characters", "SpaceCharacters"):
text.append(token["data"])
elif text:
yield TEXT, "".join(text), (None, -1, -1)
text = []
if type in ("StartTag", "EmptyTag"):
if token["namespace"]:
name = "{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
attrs = Attrs([(QName("{%s}%s" % attr if attr[0] is not None else attr[1]), value)
for attr, value in token["data"].items()])
yield (START, (QName(name), attrs), (None, -1, -1))
if type == "EmptyTag":
type = "EndTag"
if type == "EndTag":
if token["namespace"]:
name = "{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
yield END, QName(name), (None, -1, -1)
elif type == "Comment":
yield COMMENT, token["data"], (None, -1, -1)
elif type == "Doctype":
yield DOCTYPE, (token["name"], token["publicId"],
token["systemId"]), (None, -1, -1)
else:
pass # FIXME: What to do?
if text:
yield TEXT, "".join(text), (None, -1, -1)
|
neuropoly/spinalcordtoolbox | refs/heads/master | spinalcordtoolbox/image.py | 1 | #!/usr/bin/env python
#########################################################################################
#
# SCT Image API
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2018 Polytechnique Montreal <www.neuro.polymtl.ca>
#
# About the license: see the file LICENSE.TXT
#########################################################################################
# TODO: Sort out the use of Image.hdr and Image.header --> they seem to carry duplicated information.
import sys
import os
import itertools
import warnings
import logging
import shutil
import math
from typing import Sequence
import nibabel as nib
import numpy as np
import pathlib
from contrib import fslhd
import transforms3d.affines as affines
import re
from scipy.ndimage import map_coordinates
from spinalcordtoolbox.types import Coordinate
from spinalcordtoolbox.utils import sct_dir_local_path, extract_fname
logger = logging.getLogger(__name__)
def _get_permutations(im_src_orientation, im_dst_orientation):
"""
:param im_src_orientation str: Orientation of source image. Example: 'RPI'
:param im_dest_orientation str: Orientation of destination image. Example: 'SAL'
:return: list of axes permutations and list of inversions to achieve an orientation change
"""
opposite_character = {'L': 'R', 'R': 'L', 'A': 'P', 'P': 'A', 'I': 'S', 'S': 'I'}
perm = [0, 1, 2]
inversion = [1, 1, 1]
for i, character in enumerate(im_src_orientation):
try:
perm[i] = im_dst_orientation.index(character)
except ValueError:
perm[i] = im_dst_orientation.index(opposite_character[character])
inversion[i] = -1
return perm, inversion
class Slicer(object):
"""
Provides a sliced view onto original image data.
Can be used as a sequence.
Notes:
- The original image data is directly available without copy,
which is a nice feature, not a bug! Use .copy() if you need copies...
Example:
.. code:: python
for slice2d in msct_image.SlicerFancy(im3d, "RPI"):
print(slice)
"""
def __init__(self, im, orientation="LPI"):
"""
:param im: image to iterate through
:param orientation: "from" letters to indicate how to slice the image.
The slices are done on the last letter axis,
and they are defined as the first/second letter.
"""
if not isinstance(im, Image):
raise ValueError("Expecting an image")
if not orientation in all_refspace_strings():
raise ValueError("Invalid orientation spec")
# Get a different view on data, as if we were doing a reorientation
perm, inversion = _get_permutations(im.orientation, orientation)
# axes inversion (flip)
data = im.data[::inversion[0], ::inversion[1], ::inversion[2]]
# axes manipulations (transpose)
if perm == [1, 0, 2]:
data = np.swapaxes(data, 0, 1)
elif perm == [2, 1, 0]:
data = np.swapaxes(data, 0, 2)
elif perm == [0, 2, 1]:
data = np.swapaxes(data, 1, 2)
elif perm == [2, 0, 1]:
data = np.swapaxes(data, 0, 2) # transform [2, 0, 1] to [1, 0, 2]
data = np.swapaxes(data, 0, 1) # transform [1, 0, 2] to [0, 1, 2]
elif perm == [1, 2, 0]:
data = np.swapaxes(data, 0, 2) # transform [1, 2, 0] to [0, 2, 1]
data = np.swapaxes(data, 1, 2) # transform [0, 2, 1] to [0, 1, 2]
elif perm == [0, 1, 2]:
# do nothing
pass
else:
raise NotImplementedError()
self._data = data
self._orientation = orientation
self._nb_slices = data.shape[2]
def __len__(self):
return self._nb_slices
def __getitem__(self, idx):
"""
:return: an image slice, at slicing index idx
:param idx: slicing index (according to the slicing direction)
"""
if not isinstance(idx, int):
raise NotImplementedError()
if idx >= self._nb_slices:
raise IndexError("I just have {} slices!".format(self._nb_slices))
return self._data[:, :, idx]
class SlicerOneAxis(object):
"""
Image slicer to use when you don't care about the 2D slice orientation,
and don't want to specify them.
The slicer will just iterate through the right axis that corresponds to
its specification.
Can help getting ranges and slice indices.
"""
def __init__(self, im, axis="IS"):
opposite_character = {'L': 'R', 'R': 'L', 'A': 'P', 'P': 'A', 'I': 'S', 'S': 'I'}
axis_labels = "LRPAIS"
if len(axis) != 2:
raise ValueError()
if axis[0] not in axis_labels:
raise ValueError()
if axis[1] not in axis_labels:
raise ValueError()
if axis[0] != opposite_character[axis[1]]:
raise ValueError()
for idx_axis in range(2):
dim_nr = im.orientation.find(axis[idx_axis])
if dim_nr != -1:
break
if dim_nr == -1:
raise ValueError()
# SCT convention
from_dir = im.orientation[dim_nr]
self.direction = +1 if axis[0] == from_dir else -1
self.nb_slices = im.dim[dim_nr]
self.im = im
self.axis = axis
self._slice = lambda idx: tuple([(idx if x in axis else slice(None)) for x in im.orientation])
def __len__(self):
return self.nb_slices
def __getitem__(self, idx):
"""
:return: an image slice, at slicing index idx
:param idx: slicing index (according to the slicing direction)
"""
if isinstance(idx, slice):
raise NotImplementedError()
if idx >= self.nb_slices:
raise IndexError("I just have {} slices!".format(self.nb_slices))
if self.direction == -1:
idx = self.nb_slices - 1 - idx
return self.im.data[self._slice(idx)]
class SlicerMany(object):
"""
Image*s* slicer utility class.
Can help getting ranges and slice indices.
Can provide slices (being an *iterator*).
Use with great care for now, that it's not very documented.
"""
def __init__(self, images, slicerclass, *args, **kw):
if len(images) == 0:
raise ValueError("Don't expect me to work on 0 images!")
self.slicers = [slicerclass(im, *args, **kw) for im in images]
nb_slices = [x._nb_slices for x in self.slicers]
if len(set(nb_slices)) != 1:
raise ValueError("All images must have the same number of slices along the slicing axis!")
self._nb_slices = nb_slices[0]
def __len__(self):
return self._nb_slices
def __getitem__(self, idx):
return [x[idx] for x in self.slicers]
def check_affines_match(im):
hdr = im.hdr
hdr2 = hdr.copy()
try:
hdr2.set_qform(hdr.get_sform())
except np.linalg.LinAlgError:
# See https://github.com/neuropoly/spinalcordtoolbox/issues/3097
logger.warning("The sform for {} is uninitialized and may cause unexpected behaviour."
''.format(im.absolutepath))
if im.absolutepath is None:
logger.error("Internal code has produced an image with an uninitialized sform. "
"please report this on github at https://github.com/neuropoly/spinalcordtoolbox/issues "
"or on the SCT forums https://forum.spinalcordmri.org/.")
return(True)
return np.allclose(hdr.get_qform(), hdr2.get_qform(), atol=1e-3)
class Image(object):
"""
Create an object that behaves similarly to nibabel's image object. Useful additions include: dim, check_sform and
a few methods (load, save) that deal with image dtype.
"""
def __init__(self, param=None, hdr=None, orientation=None, absolutepath=None, dim=None, verbose=1,
check_sform=False):
"""
:param param: string indicating a path to a image file or an `Image` object.
:param hdr: a nibabel header object to use as the header for the image (overwritten if `param` is provided)
:param orientation: a three character orientation code (e.g. RPI).
:param absolutepath: a relative path to associate with the image.
:param dim: The dimensions of the image, defaults to automatically determined.
:param verbose: integer how verbose to be 0 is silent 1 is chatty.
:param check_sform: whether or not to check whether the sform matches the qform. If this is set to `True`,
`Image` will fail raise an error if they don't match.
"""
# initialization of all parameters
self.im_file = None
self.data = None
self._path = None
self.ext = ""
if hdr is None:
hdr = self.hdr = nib.Nifti1Header() # an empty header
else:
self.hdr = hdr
if absolutepath is not None:
self._path = os.path.abspath(absolutepath)
self.verbose = verbose
# load an image from file
if isinstance(param, str) or (sys.hexversion < 0x03000000 and isinstance(param, unicode)):
self.loadFromPath(param, verbose)
# copy constructor
elif isinstance(param, type(self)):
self.copy(param)
# create an empty image (full of zero) of dimension [dim]. dim must be [x,y,z] or (x,y,z). No header.
elif isinstance(param, list):
self.data = np.zeros(param)
self.hdr = hdr
# create a copy of im_ref
elif isinstance(param, (np.ndarray, np.generic)):
self.data = param
self.hdr = hdr
else:
raise TypeError('Image constructor takes at least one argument.')
# Make sure sform and qform are the same.
# Context: https://github.com/neuropoly/spinalcordtoolbox/issues/2429
if check_sform and not check_affines_match(self):
if self.absolutepath is None:
logger.error("Internal code has produced an image with inconsistent qform and sform "
"please report this on github at https://github.com/neuropoly/spinalcordtoolbox/issues "
" or on the SCT forum https://forum.spinalcordmri.org/.")
else:
logger.error(f"Image {self._path} has different qform and sform matrices. This can produce incorrect "
f"results. Please use 'sct_image -i {self._path} -header' to check that both affine "
f"matrices are valid. Then, consider running either 'sct_image -set-sform-to-qform' or "
f"'sct_image -set-qform-to-sform' to fix any discrepancies you may find.")
raise ValueError("Image sform does not match qform")
@property
def dim(self):
return get_dimension(self)
@property
def orientation(self):
return get_orientation(self)
@property
def absolutepath(self):
"""
Storage path (either actual or potential)
Notes:
- As several tools perform chdir() it's very important to have absolute paths
- When set, if relative:
- If it already existed, it becomes a new basename in the old dirname
- Else, it becomes absolute (shortcut)
Usually not directly touched (use `Image.save`), but in some cases it's
the best way to set it.
"""
return self._path
@absolutepath.setter
def absolutepath(self, value):
if value is None:
self._path = None
return
elif not os.path.isabs(value) and self._path is not None:
value = os.path.join(os.path.dirname(self._path), value)
elif not os.path.isabs(value):
value = os.path.abspath(value)
self._path = value
@property
def header(self):
return self.hdr
@header.setter
def header(self, value):
self.hdr = value
def __deepcopy__(self, memo):
from copy import deepcopy
return type(self)(deepcopy(self.data, memo), deepcopy(self.hdr, memo), deepcopy(self.orientation, memo), deepcopy(self.absolutepath, memo), deepcopy(self.dim, memo))
def copy(self, image=None):
from copy import deepcopy
if image is not None:
self.im_file = deepcopy(image.im_file)
self.data = deepcopy(image.data)
self.hdr = deepcopy(image.hdr)
self._path = deepcopy(image._path)
else:
return deepcopy(self)
def copy_qform_from_ref(self, im_ref):
"""
Copy qform and sform and associated codes from a reference Image object
:param im_ref:
:return:
"""
# Copy q/sform and code
self.hdr.set_qform(im_ref.hdr.get_qform())
self.hdr._structarr['qform_code'] = im_ref.hdr._structarr['qform_code']
self.hdr.set_sform(im_ref.hdr.get_sform())
self.hdr._structarr['sform_code'] = im_ref.hdr._structarr['sform_code']
def set_sform_to_qform(self):
"""Use this (or set_qform_to_sform) when matching matrices are required."""
self.hdr.set_sform(self.hdr.get_qform())
self.hdr._structarr['sform_code'] = self.hdr._structarr['qform_code']
def set_qform_to_sform(self):
"""Use this or (set_sform_to_qform) when matching matrices are required."""
self.hdr.set_qform(self.hdr.get_sform())
self.hdr._structarr['qform_code'] = self.hdr._structarr['sform_code']
def loadFromPath(self, path, verbose):
"""
This function load an image from an absolute path using nibabel library
:param path: path of the file from which the image will be loaded
:return:
"""
self.im_file = nib.load(path)
self.data = self.im_file.get_data()
self.hdr = self.im_file.header
self.absolutepath = path
if path != self.absolutepath:
logger.debug("Loaded %s (%s) orientation %s shape %s", path, self.absolutepath, self.orientation, self.data.shape)
else:
logger.debug("Loaded %s orientation %s shape %s", path, self.orientation, self.data.shape)
def change_shape(self, shape):
"""
Change data shape (in-place)
This is mostly useful for adding/removing a fourth dimension,
you probably don't want to use this function.
"""
change_shape(self, shape, self)
return self
def change_orientation(self, orientation, inverse=False):
"""
Change orientation on image (in-place).
:param orientation: orientation string (SCT "from" convention)
:param inverse: if you think backwards, use this to specify that you actually\
want to transform *from* the specified orientation, not *to*\
it.
"""
change_orientation(self, orientation, self, inverse=inverse)
return self
def change_type(self, dtype):
"""
Change data type on image.
Note: the image path is voided.
"""
change_type(self, dtype, self)
return self
def save(self, path=None, dtype=None, verbose=1, mutable=False):
"""
Write an image in a nifti file
:param path: Where to save the data, if None it will be taken from the\
absolutepath member.\
If path is a directory, will save to a file under this directory\
with the basename from the absolutepath member.
:param dtype: if not set, the image is saved in the same type as input data\
if 'minimize', image storage space is minimized\
(2, 'uint8', np.uint8, "NIFTI_TYPE_UINT8"),\
(4, 'int16', np.int16, "NIFTI_TYPE_INT16"),\
(8, 'int32', np.int32, "NIFTI_TYPE_INT32"),\
(16, 'float32', np.float32, "NIFTI_TYPE_FLOAT32"),\
(32, 'complex64', np.complex64, "NIFTI_TYPE_COMPLEX64"),\
(64, 'float64', np.float64, "NIFTI_TYPE_FLOAT64"),\
(256, 'int8', np.int8, "NIFTI_TYPE_INT8"),\
(512, 'uint16', np.uint16, "NIFTI_TYPE_UINT16"),\
(768, 'uint32', np.uint32, "NIFTI_TYPE_UINT32"),\
(1024,'int64', np.int64, "NIFTI_TYPE_INT64"),\
(1280, 'uint64', np.uint64, "NIFTI_TYPE_UINT64"),\
(1536, 'float128', _float128t, "NIFTI_TYPE_FLOAT128"),\
(1792, 'complex128', np.complex128, "NIFTI_TYPE_COMPLEX128"),\
(2048, 'complex256', _complex256t, "NIFTI_TYPE_COMPLEX256"),
:param mutable: whether to update members with newly created path or dtype
"""
if path is None and self.absolutepath is None:
raise RuntimeError("Don't know where to save the image (no absolutepath or path parameter)")
elif path is not None and os.path.isdir(path) and self.absolutepath is not None:
# Save to destination directory with original basename
path = os.path.join(os.path.abspath(path), os.path.basename(self.absolutepath))
path = path or self.absolutepath
if dtype is not None:
dst = self.copy()
dst.change_type(dtype)
data = dst.data
else:
data = self.data
# update header
hdr = self.hdr.copy() if self.hdr else None
if hdr:
hdr.set_data_shape(data.shape)
# Update dtype if provided (but not if based on SCT-specific values: 'minimize')
if (dtype is not None) and (dtype not in ['minimize', 'minimize_int']):
hdr.set_data_dtype(dtype)
# nb. that copy() is important because if it were a memory map, save()
# would corrupt it
img = nib.nifti1.Nifti1Image(data.copy(), None, hdr)
if os.path.isfile(path):
if verbose:
logger.warning('File ' + path + ' already exists. Will overwrite it.')
# save file
if os.path.isabs(path):
logger.debug("Saving image to %s orientation %s shape %s",
path, self.orientation, data.shape)
else:
logger.debug("Saving image to %s (%s) orientation %s shape %s",
path, os.path.abspath(path), self.orientation, data.shape)
nib.save(img, path)
if mutable:
self.absolutepath = path
self.data = data
if not os.path.isfile(path):
raise RuntimeError("Couldn't save {}".format(path))
return self
def getNonZeroCoordinates(self, sorting=None, reverse_coord=False, coordValue=False):
"""
This function return all the non-zero coordinates that the image contains.
Coordinate list can also be sorted by x, y, z, or the value with the parameter sorting='x', sorting='y', sorting='z' or sorting='value'
If reverse_coord is True, coordinate are sorted from larger to smaller.
"""
n_dim = 1
if self.dim[3] == 1:
n_dim = 3
else:
n_dim = 4
if self.dim[2] == 1:
n_dim = 2
if n_dim == 3:
X, Y, Z = (self.data > 0).nonzero()
list_coordinates = [Coordinate([X[i], Y[i], Z[i], self.data[X[i], Y[i], Z[i]]]) for i in range(0, len(X))]
elif n_dim == 2:
try:
X, Y = (self.data > 0).nonzero()
list_coordinates = [Coordinate([X[i], Y[i], 0, self.data[X[i], Y[i]]]) for i in range(0, len(X))]
except ValueError:
X, Y, Z = (self.data > 0).nonzero()
list_coordinates = [Coordinate([X[i], Y[i], 0, self.data[X[i], Y[i], 0]]) for i in range(0, len(X))]
if coordValue:
from spinalcordtoolbox.types import CoordinateValue
if n_dim == 3:
list_coordinates = [CoordinateValue([X[i], Y[i], Z[i], self.data[X[i], Y[i], Z[i]]]) for i in range(0, len(X))]
else:
list_coordinates = [CoordinateValue([X[i], Y[i], 0, self.data[X[i], Y[i]]]) for i in range(0, len(X))]
if sorting is not None:
if reverse_coord not in [True, False]:
raise ValueError('reverse_coord parameter must be a boolean')
if sorting == 'x':
list_coordinates = sorted(list_coordinates, key=lambda obj: obj.x, reverse=reverse_coord)
elif sorting == 'y':
list_coordinates = sorted(list_coordinates, key=lambda obj: obj.y, reverse=reverse_coord)
elif sorting == 'z':
list_coordinates = sorted(list_coordinates, key=lambda obj: obj.z, reverse=reverse_coord)
elif sorting == 'value':
list_coordinates = sorted(list_coordinates, key=lambda obj: obj.value, reverse=reverse_coord)
else:
raise ValueError("sorting parameter must be either 'x', 'y', 'z' or 'value'")
return list_coordinates
def getCoordinatesAveragedByValue(self):
"""
This function computes the mean coordinate of group of labels in the image. This is especially useful for label's images.
:return: list of coordinates that represent the center of mass of each group of value.
"""
# 1. Extraction of coordinates from all non-null voxels in the image. Coordinates are sorted by value.
coordinates = self.getNonZeroCoordinates(sorting='value')
# 2. Separate all coordinates into groups by value
groups = dict()
for coord in coordinates:
if coord.value in groups:
groups[coord.value].append(coord)
else:
groups[coord.value] = [coord]
# 3. Compute the center of mass of each group of voxels and write them into the output image
averaged_coordinates = []
for value, list_coord in groups.items():
averaged_coordinates.append(sum(list_coord) / float(len(list_coord)))
averaged_coordinates = sorted(averaged_coordinates, key=lambda obj: obj.value, reverse=False)
return averaged_coordinates
def transfo_pix2phys(self, coordi=None):
"""
This function returns the physical coordinates of all points of 'coordi'.
:param coordi: sequence of (nb_points x 3) values containing the pixel coordinate of points.
:return: sequence with the physical coordinates of the points in the space of the image.
Example:
.. code:: python
img = Image('file.nii.gz')
coordi_pix = [[1,1,1]] # for points: (1,1,1). N.B. Important to write [[x,y,z]] instead of [x,y,z]
coordi_pix = [[1,1,1],[2,2,2],[4,4,4]] # for points: (1,1,1), (2,2,2) and (4,4,4)
coordi_phys = img.transfo_pix2phys(coordi=coordi_pix)
"""
m_p2f = self.hdr.get_best_affine()
aug = np.hstack((np.asarray(coordi), np.ones((len(coordi), 1))))
ret = np.empty_like(coordi, dtype=np.float64)
for idx_coord, coord in enumerate(aug):
phys = np.matmul(m_p2f, coord)
ret[idx_coord] = phys[:3]
return ret
def transfo_phys2pix(self, coordi, real=True):
"""
This function returns the pixels coordinates of all points of 'coordi'
:param coordi: sequence of (nb_points x 3) values containing the pixel coordinate of points.
:param real: whether to return real pixel coordinates
:return: sequence with the physical coordinates of the points in the space of the image.
"""
m_p2f = self.hdr.get_best_affine()
m_f2p = np.linalg.inv(m_p2f)
aug = np.hstack((np.asarray(coordi), np.ones((len(coordi), 1))))
ret = np.empty_like(coordi, dtype=np.float64)
for idx_coord, coord in enumerate(aug):
phys = np.matmul(m_f2p, coord)
ret[idx_coord] = phys[:3]
if real:
return np.int32(np.round(ret))
else:
return ret
def get_values(self, coordi=None, interpolation_mode=0, border='constant', cval=0.0):
"""
This function returns the intensity value of the image at the position coordi (can be a list of coordinates).
:param coordi: continuouspix
:param interpolation_mode: 0=nearest neighbor, 1= linear, 2= 2nd-order spline, 3= 2nd-order spline, 4= 2nd-order spline, 5= 5th-order spline
:return: intensity values at continuouspix with interpolation_mode
"""
return map_coordinates(self.data, coordi, output=np.float32, order=interpolation_mode, mode=border, cval=cval)
def get_transform(self, im_ref, mode='affine'):
aff_im_self = self.im_file.affine
aff_im_ref = im_ref.im_file.affine
transform = np.matmul(np.linalg.inv(aff_im_self), aff_im_ref)
if mode == 'affine':
transform = np.matmul(np.linalg.inv(aff_im_self), aff_im_ref)
else:
T_self, R_self, Sc_self, Sh_self = affines.decompose44(aff_im_self)
T_ref, R_ref, Sc_ref, Sh_ref = affines.decompose44(aff_im_ref)
if mode == 'translation':
T_transform = T_ref - T_self
R_transform = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
Sc_transform = np.array([1.0, 1.0, 1.0])
transform = affines.compose(T_transform, R_transform, Sc_transform)
elif mode == 'rigid':
T_transform = T_ref - T_self
R_transform = np.matmul(np.linalg.inv(R_self), R_ref)
Sc_transform = np.array([1.0, 1.0, 1.0])
transform = affines.compose(T_transform, R_transform, Sc_transform)
elif mode == 'rigid_scaling':
T_transform = T_ref - T_self
R_transform = np.matmul(np.linalg.inv(R_self), R_ref)
Sc_transform = Sc_ref / Sc_self
transform = affines.compose(T_transform, R_transform, Sc_transform)
else:
transform = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
return transform
def get_inverse_transform(self, im_ref, mode='affine'):
aff_im_self = self.im_file.affine
aff_im_ref = im_ref.im_file.affine
if mode == 'affine':
transform = np.matmul(np.linalg.inv(aff_im_ref), aff_im_self)
else:
T_self, R_self, Sc_self, Sh_self = affines.decompose44(aff_im_self)
T_ref, R_ref, Sc_ref, Sh_ref = affines.decompose44(aff_im_ref)
if mode == 'translation':
T_transform = T_self - T_ref
R_transform = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
Sc_transform = np.array([1.0, 1.0, 1.0])
transform = affines.compose(T_transform, R_transform, Sc_transform)
elif mode == 'rigid':
T_transform = T_self - T_ref
R_transform = np.matmul(np.linalg.inv(R_ref), R_self)
Sc_transform = np.array([1.0, 1.0, 1.0])
transform = affines.compose(T_transform, R_transform, Sc_transform)
elif mode == 'rigid_scaling':
T_transform = T_self - T_ref
R_transform = np.matmul(np.linalg.inv(R_ref), R_self)
Sc_transform = Sc_self / Sc_ref
transform = affines.compose(T_transform, R_transform, Sc_transform)
else:
transform = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
return transform
def get_directions(self):
"""
This function return the X, Y, and Z axes of the image
return: X, Y and Z axes of the image
"""
direction_matrix = self.header.get_best_affine()
T_self, R_self, Sc_self, Sh_self = affines.decompose44(direction_matrix)
return R_self[0:3, 0], R_self[0:3, 1], R_self[0:3, 2]
def interpolate_from_image(self, im_ref, fname_output=None, interpolation_mode=1, border='constant'):
"""
This function interpolates an image by following the grid of a reference image.
Example of use:
.. code:: python
from spinalcordtoolbox.image import Image
im_input = Image(fname_input)
im_ref = Image(fname_ref)
im_input.interpolate_from_image(im_ref, fname_output, interpolation_mode=1)
:param im_ref: reference Image that contains the grid on which interpolate.
:param border: Points outside the boundaries of the input are filled according\
to the given mode ('constant', 'nearest', 'reflect' or 'wrap')
:return: a new image that has the same dimensions/grid of the reference image but the data of self image.
"""
nx, ny, nz, nt, px, py, pz, pt = im_ref.dim
x, y, z = np.mgrid[0:nx, 0:ny, 0:nz]
indexes_ref = np.array(list(zip(x.ravel(), y.ravel(), z.ravel())))
physical_coordinates_ref = im_ref.transfo_pix2phys(indexes_ref)
# TODO: add optional transformation from reference space to image space to physical coordinates of ref grid.
# TODO: add choice to do non-full transorm: translation, (rigid), affine
# 1. get transformation
# 2. apply transformation on coordinates
coord_im = self.transfo_phys2pix(physical_coordinates_ref, real=False)
interpolated_values = self.get_values(np.array([coord_im[:, 0], coord_im[:, 1], coord_im[:, 2]]), interpolation_mode=interpolation_mode, border=border)
im_output = Image(im_ref)
if interpolation_mode == 0:
im_output.change_type('int32')
else:
im_output.change_type('float32')
im_output.data = np.reshape(interpolated_values, (nx, ny, nz))
if fname_output is not None:
im_output.absolutepath = fname_output
im_output.save()
return im_output
def mean(self, dim):
"""
Average across specified dimension
:param dim: int: axis used for averaging
:return: Image object
"""
im_out = empty_like(self)
im_out.data = np.mean(self.data, dim)
# TODO: the line below fails because .dim is immutable. We should find a solution to update dim accordingly
# because as of now, this field contains wrong values (in this case, the dimension should be changed)
# im_out.dim = im_out.data.shape[:dim] + (1,) + im_out.data.shape[dim:]
return im_out
def compute_dice(image1, image2, mode='3d', label=1, zboundaries=False):
"""
This function computes the Dice coefficient between two binary images.
:param image1: object Image
:param image2: object Image
:param mode: mode of computation of Dice.\
3d: compute Dice coefficient over the full 3D volume\
2d-slices: compute the 2D Dice coefficient for each slice of the volumes\
:param: label: binary label for which Dice coefficient will be computed. Default=1
:paaram: zboundaries: True/False. If True, the Dice coefficient is computed over a Z-ROI where both segmentations are\
present. Default=False.
:return: Dice coefficient as a float between 0 and 1. Raises ValueError exception if an error occurred.
"""
MODES = ['3d', '2d-slices']
if mode not in MODES:
raise ValueError('\n\nERROR: mode must be one of these values:' + ', '.join(MODES))
dice = 0.0 # default value of dice is 0
# check if images are in the same coordinate system
assert image1.data.shape == image2.data.shape, "\n\nERROR: the data (" + image1.absolutepath + " and " + image2.absolutepath + ") don't have the same size.\nPlease use \"sct_register_multimodal -i im1.nii.gz -d im2.nii.gz -identity 1\" to put the input images in the same space"
# if necessary, change orientation of images to RPI and compute segmentation boundaries
if mode == '2d-slices' or (mode == '3d' and zboundaries):
# changing orientation to RPI if necessary
if image1.orientation != 'RPI':
image1 = change_orientation(image1, "RPI")
if image2.orientation != 'RPI':
image2 = change_orientation(image2, "RPI")
zmin, zmax = 0, image1.data.shape[2] - 1
if zboundaries:
# compute Z-ROI for which both segmentations are present.
for z in range(zmin, zmax + 1): # going from inferior to superior
if np.any(image1.data[:, :, z]) and np.any(image2.data[:, :, z]):
zmin = z
break
for z in range(zmax, zmin + 1, -1): # going from superior to inferior
if np.any(image1.data[:, :, z]) and np.any(image2.data[:, :, z]):
zmax = z
break
if zmin > zmax:
# segmentations do not overlap
return 0.0
if mode == '3d':
# compute dice coefficient over Z-ROI
data1 = image1.data[:, :, zmin:zmax]
data2 = image2.data[:, :, zmin:zmax]
dice = np.sum(data2[data1 == label]) * 2.0 / (np.sum(data1) + np.sum(data2))
elif mode == '2d-slices':
raise ValueError('2D slices Dice coefficient feature is not implemented yet')
elif mode == '3d':
# compute 3d dice coefficient
dice = np.sum(image2.data[image1.data == label]) * 2.0 / (np.sum(image1.data) + np.sum(image2.data))
return dice
def concat_data(im_in_list: Sequence[Image], dim, pixdim=None, squeeze_data=False):
"""
Concatenate data
:param im_in_list: list of Images
:param dim: dimension: 0, 1, 2, 3.
:param pixdim: pixel resolution to join to image header
:param squeeze_data: bool: if True, remove the last dim if it is a singleton.
:return im_out: concatenated image
"""
# WARNING: calling concat_data in python instead of in command line causes a non-understood issue (results are
# different with both options) from numpy import concatenate, expand_dims
dat_list = []
data_concat_list = []
for i, im in enumerate(im_in_list):
# if there is more than 100 images to concatenate, then it does it iteratively to avoid memory issue.
if i != 0 and i % 100 == 0:
data_concat_list.append(np.concatenate(dat_list, axis=dim))
dat = im.data
# if image shape is smaller than asked dim, then expand dim
if len(dat.shape) <= dim:
dat = np.expand_dims(dat, dim)
dat_list = [dat]
del im
del dat
else:
dat = im.data
# if image shape is smaller than asked dim, then expand dim
if len(dat.shape) <= dim:
dat = np.expand_dims(dat, dim)
dat_list.append(dat)
del im
del dat
if data_concat_list:
data_concat_list.append(np.concatenate(dat_list, axis=dim))
data_concat = np.concatenate(data_concat_list, axis=dim)
else:
data_concat = np.concatenate(dat_list, axis=dim)
im_in_first = im_in_list[0]
im_out = empty_like(im_in_first) # NB: empty_like reuses the header from the first input image for im_out
if im_in_first.absolutepath is not None:
im_out.absolutepath = add_suffix(im_in_first.absolutepath, '_concat')
if pixdim is not None:
im_out.hdr['pixdim'] = pixdim
if squeeze_data and data_concat.shape[dim] == 1:
# remove the last dim if it is a singleton.
im_out.data = data_concat.reshape(
tuple([x for (idx_shape, x) in enumerate(data_concat.shape) if idx_shape != dim]))
else:
im_out.data = data_concat
# TODO: the line below fails because .dim is immutable. We should find a solution to update dim accordingly
# because as of now, this field contains wrong values (in this case, the dimension should be changed). Also
# see mean()
# im_out.dim = im_out.data.shape[:dim] + (1,) + im_out.data.shape[dim:]
return im_out
def find_zmin_zmax(im, threshold=0.1):
"""
Find the min (and max) z-slice index below which (and above which) slices only have voxels below a given threshold.
:param im: Image object
:param threshold: threshold to apply before looking for zmin/zmax, typically corresponding to noise level.
:return: [zmin, zmax]
"""
slicer = SlicerOneAxis(im, axis="IS")
# Make sure image is not empty
if not np.any(slicer):
logger.error('Input image is empty')
# Iterate from bottom to top until we find data
for zmin in range(0, len(slicer)):
if np.any(slicer[zmin] > threshold):
break
# Conversely from top to bottom
for zmax in range(len(slicer) - 1, zmin, -1):
dataz = slicer[zmax]
if np.any(slicer[zmax] > threshold):
break
return zmin, zmax
def get_dimension(im_file, verbose=1):
"""
Get dimension from Image or nibabel object. Manages 2D, 3D or 4D images.
:param: im_file: Image or nibabel object
:return: nx, ny, nz, nt, px, py, pz, pt
"""
# initialization
nx, ny, nz, nt, px, py, pz, pt = 1, 1, 1, 1, 1, 1, 1, 1
if type(im_file) is nib.nifti1.Nifti1Image:
header = im_file.header
elif type(im_file) is Image:
header = im_file.hdr
else:
header = None
logger.warning("The provided image file is neither a nibabel.nifti1.Nifti1Image instance nor an Image instance")
nb_dims = len(header.get_data_shape())
if nb_dims == 2:
nx, ny = header.get_data_shape()
px, py = header.get_zooms()
if nb_dims == 3:
nx, ny, nz = header.get_data_shape()
px, py, pz = header.get_zooms()
if nb_dims == 4:
nx, ny, nz, nt = header.get_data_shape()
px, py, pz, pt = header.get_zooms()
return nx, ny, nz, nt, px, py, pz, pt
def all_refspace_strings():
"""
:return: all possible orientation strings ['RAI', 'RAS', 'RPI', 'RPS', ...]
"""
return [x for x in itertools.chain(*[["".join(x) for x in itertools.product(*seq)] for seq in itertools.permutations(("RL", "AP", "IS"), 3)])]
def get_orientation(im):
"""
:param im: an Image
:return: reference space string (ie. what's in Image.orientation)
"""
res = "".join(nib.orientations.aff2axcodes(im.hdr.get_best_affine()))
return orientation_string_nib2sct(res)
return res # for later ;)
def orientation_string_nib2sct(s):
"""
:return: SCT reference space code from nibabel one
"""
opposite_character = {'L': 'R', 'R': 'L', 'A': 'P', 'P': 'A', 'I': 'S', 'S': 'I'}
return "".join([opposite_character[x] for x in s])
orientation_string_sct2nib = orientation_string_nib2sct
def change_shape(im_src, shape, im_dst=None):
"""
:param shape: shape to obtain (must be compatible with original one)
:return: an image with changed shape
.. note::
The resulting image has no path
"""
if im_dst is None:
im_dst = im_src.copy()
im_dst._path = None
if im_src.data.flags.f_contiguous:
im_dst.data = im_src.data.reshape(shape, order="F")
elif im_src.data.flags.c_contiguous:
warnings.warn("Encountered an array with C order, strange!")
im_dst.data = im_src.data.reshape(shape, order="C")
else:
# image data may be a view
im_dst_data = im_src.data.copy().reshape(shape, order="F")
pair = nib.nifti1.Nifti1Pair(im_dst.data, im_dst.hdr.get_best_affine(), im_dst.hdr)
im_dst.hdr = pair.header
return im_dst
def change_orientation(im_src, orientation, im_dst=None, inverse=False, data_only=False):
"""
:param im_src: source image
:param orientation: orientation string (SCT "from" convention)
:param im_dst: destination image (can be the source image for in-place
operation, can be unset to generate one)
:param inverse: if you think backwards, use this to specify that you actually
want to transform *from* the specified orientation, not *to* it.
:param data_only: If you want to only permute the data, not the header. Only use if you know there is a problem
with the native orientation of the input data.
:return: an image with changed orientation
.. note::
- the resulting image has no path member set
- if the source image is < 3D, it is reshaped to 3D and the destination is 3D
"""
# TODO: make sure to cover all cases for setorient-data
if len(im_src.data.shape) < 3:
pass # Will reshape to 3D
elif len(im_src.data.shape) == 3:
pass # OK, standard 3D volume
elif len(im_src.data.shape) == 4:
pass # OK, standard 4D volume
elif len(im_src.data.shape) == 5 and im_src.header.get_intent()[0] == "vector":
pass # OK, physical displacement field
else:
raise NotImplementedError("Don't know how to change orientation for this image")
im_src_orientation = im_src.orientation
im_dst_orientation = orientation
if inverse:
im_src_orientation, im_dst_orientation = im_dst_orientation, im_src_orientation
perm, inversion = _get_permutations(im_src_orientation, im_dst_orientation)
if im_dst is None:
im_dst = im_src.copy()
im_dst._path = None
im_src_data = im_src.data
if len(im_src_data.shape) < 3:
im_src_data = im_src_data.reshape(tuple(list(im_src_data.shape) + ([1] * (3 - len(im_src_data.shape)))))
# Update data by performing inversions and swaps
# axes inversion (flip)
data = im_src_data[::inversion[0], ::inversion[1], ::inversion[2]]
# axes manipulations (transpose)
if perm == [1, 0, 2]:
data = np.swapaxes(data, 0, 1)
elif perm == [2, 1, 0]:
data = np.swapaxes(data, 0, 2)
elif perm == [0, 2, 1]:
data = np.swapaxes(data, 1, 2)
elif perm == [2, 0, 1]:
data = np.swapaxes(data, 0, 2) # transform [2, 0, 1] to [1, 0, 2]
data = np.swapaxes(data, 0, 1) # transform [1, 0, 2] to [0, 1, 2]
elif perm == [1, 2, 0]:
data = np.swapaxes(data, 0, 2) # transform [1, 2, 0] to [0, 2, 1]
data = np.swapaxes(data, 1, 2) # transform [0, 2, 1] to [0, 1, 2]
elif perm == [0, 1, 2]:
# do nothing
pass
else:
raise NotImplementedError()
# Update header
im_src_aff = im_src.hdr.get_best_affine()
aff = nib.orientations.inv_ornt_aff(
np.array((perm, inversion)).T,
im_src_data.shape)
im_dst_aff = np.matmul(im_src_aff, aff)
if not data_only:
im_dst.header.set_qform(im_dst_aff)
im_dst.header.set_sform(im_dst_aff)
im_dst.header.set_data_shape(data.shape)
im_dst.data = data
return im_dst
def change_type(im_src, dtype, im_dst=None):
"""
Change the voxel type of the image
:param dtype: if not set, the image is saved in standard type\
if 'minimize', image space is minimize\
if 'minimize_int', image space is minimize and values are approximated to integers\
(2, 'uint8', np.uint8, "NIFTI_TYPE_UINT8"),\
(4, 'int16', np.int16, "NIFTI_TYPE_INT16"),\
(8, 'int32', np.int32, "NIFTI_TYPE_INT32"),\
(16, 'float32', np.float32, "NIFTI_TYPE_FLOAT32"),\
(32, 'complex64', np.complex64, "NIFTI_TYPE_COMPLEX64"),\
(64, 'float64', np.float64, "NIFTI_TYPE_FLOAT64"),\
(256, 'int8', np.int8, "NIFTI_TYPE_INT8"),\
(512, 'uint16', np.uint16, "NIFTI_TYPE_UINT16"),\
(768, 'uint32', np.uint32, "NIFTI_TYPE_UINT32"),\
(1024,'int64', np.int64, "NIFTI_TYPE_INT64"),\
(1280, 'uint64', np.uint64, "NIFTI_TYPE_UINT64"),\
(1536, 'float128', _float128t, "NIFTI_TYPE_FLOAT128"),\
(1792, 'complex128', np.complex128, "NIFTI_TYPE_COMPLEX128"),\
(2048, 'complex256', _complex256t, "NIFTI_TYPE_COMPLEX256"),
:return:
"""
if im_dst is None:
im_dst = im_src.copy()
im_dst._path = None
if dtype is None:
return im_dst
# get min/max from input image
min_in = np.nanmin(im_src.data)
max_in = np.nanmax(im_src.data)
# find optimum type for the input image
if dtype in ('minimize', 'minimize_int'):
# warning: does not take intensity resolution into account, neither complex voxels
# check if voxel values are real or integer
isInteger = True
if dtype == 'minimize':
for vox in im_src.data.flatten():
if int(vox) != vox:
isInteger = False
break
if isInteger:
if min_in >= 0: # unsigned
if max_in <= np.iinfo(np.uint8).max:
dtype = np.uint8
elif max_in <= np.iinfo(np.uint16):
dtype = np.uint16
elif max_in <= np.iinfo(np.uint32).max:
dtype = np.uint32
elif max_in <= np.iinfo(np.uint64).max:
dtype = np.uint64
else:
raise ValueError("Maximum value of the image is to big to be represented.")
else:
if max_in <= np.iinfo(np.int8).max and min_in >= np.iinfo(np.int8).min:
dtype = np.int8
elif max_in <= np.iinfo(np.int16).max and min_in >= np.iinfo(np.int16).min:
dtype = np.int16
elif max_in <= np.iinfo(np.int32).max and min_in >= np.iinfo(np.int32).min:
dtype = np.int32
elif max_in <= np.iinfo(np.int64).max and min_in >= np.iinfo(np.int64).min:
dtype = np.int64
else:
raise ValueError("Maximum value of the image is to big to be represented.")
else:
# if max_in <= np.finfo(np.float16).max and min_in >= np.finfo(np.float16).min:
# type = 'np.float16' # not supported by nibabel
if max_in <= np.finfo(np.float32).max and min_in >= np.finfo(np.float32).min:
dtype = np.float32
elif max_in <= np.finfo(np.float64).max and min_in >= np.finfo(np.float64).min:
dtype = np.float64
dtype = to_dtype(dtype)
else:
dtype = to_dtype(dtype)
# if output type is int, check if it needs intensity rescaling
if "int" in dtype.name:
# get min/max from output type
min_out = np.iinfo(dtype).min
max_out = np.iinfo(dtype).max
# before rescaling, check if there would be an intensity overflow
if (min_in < min_out) or (max_in > max_out):
# This condition is important for binary images since we do not want to scale them
logger.warning(f"To avoid intensity overflow due to convertion to +{dtype.name}+, intensity will be rescaled to the maximum quantization scale")
# rescale intensity
data_rescaled = im_src.data * (max_out - min_out) / (max_in - min_in)
im_dst.data = data_rescaled - (data_rescaled.min() - min_out)
# change type of data in both numpy array and nifti header
im_dst.data = getattr(np, dtype.name)(im_dst.data)
im_dst.hdr.set_data_dtype(dtype)
return im_dst
def to_dtype(dtype):
"""
Take a dtypeification and return an np.dtype
:param dtype: dtypeification (string or np.dtype or None are supported for now)
:return: dtype or None
"""
# TODO add more or filter on things supported by nibabel
if dtype is None:
return None
if isinstance(dtype, type):
try:
if isinstance(dtype(0).dtype, np.dtype):
return dtype(0).dtype
except: # TODO
raise
if isinstance(dtype, np.dtype):
return dtype
if isinstance(dtype, str):
return np.dtype(dtype)
raise TypeError("data type {}: {} not understood".format(dtype.__class__, dtype))
def zeros_like(img, dtype=None):
"""
:param img: reference image
:param dtype: desired data type (optional)
:return: an Image with the same shape and header, filled with zeros
Similar to numpy.zeros_like(), the goal of the function is to show the developer's
intent and avoid doing a copy, which is slower than initialization with a constant.
"""
dst = change_type(img, dtype)
dst.data[:] = 0
return dst
def empty_like(img, dtype=None):
"""
:param img: reference image
:param dtype: desired data type (optional)
:return: an Image with the same shape and header, whose data is uninitialized
Similar to numpy.empty_like(), the goal of the function is to show the developer's
intent and avoid touching the allocated memory, because it will be written to
afterwards.
"""
dst = change_type(img, dtype)
return dst
def spatial_crop(im_src, spec, im_dst=None):
"""
Crop an image in {0,1,2} dimension(s),
properly altering the header to not change the physical-logical corresondance.
:param spec: dict of dim -> [lo,hi] bounds (integer voxel coordinates)
"""
# Compute bounds
bounds = [(0, x - 1) for x in im_src.data.shape]
for k, v in spec.items():
bounds[k] = v
bounds_ndslice = tuple([slice(a, b + 1) for (a, b) in bounds])
bounds = np.array(bounds)
# Crop data
new_data = im_src.data[bounds_ndslice]
# Update header
#
# Ref: https://mail.python.org/pipermail/neuroimaging/2017-August/001501.html
# Given A, we want to find A' that is identical up to the intercept, such
# that A * [x_0, y_0, z_0, 1]' == A' * [0, 0, 0, 1].
# Conveniently, A' * [0, 0, 0, 1]' is the fourth row in the affine matrix, so
# we're done as soon as we calculate the LHS:
aff = im_src.header.get_best_affine()
new_aff = aff.copy()
new_aff[:, [3]] = aff.dot(np.vstack((bounds[:, [0]], [1])))
new_img = nib.nifti1.Nifti1Image(new_data, new_aff, im_src.header)
if im_dst is None:
im_dst = im_src.copy()
im_dst.header = new_img.header
im_dst.data = new_data
return im_dst
def convert(img: Image, squeeze_data=True, dtype=None):
"""
"""
if squeeze_data:
img.data = np.squeeze(img.data)
if dtype:
img.change_type(dtype)
return img
def split_img_data(src_img: Image, dim, squeeze_data=True):
"""
Split data
:param src_img: input image.
:param dim: dimension: 0, 1, 2, 3.
:return: list of split images
"""
dim_list = ['x', 'y', 'z', 't']
data = src_img.data
# in case input volume is 3d and dim=t, create new axis
if dim + 1 > len(np.shape(data)):
data = data[..., np.newaxis]
# in case splitting along the last dim, make sure to remove the last dim to avoid singleton
if dim + 1 == len(np.shape(data)):
if squeeze_data:
do_reshape = True
else:
do_reshape = False
else:
do_reshape = False
# Split data into list
data_split = np.array_split(data, data.shape[dim], dim)
# Write each file
im_out_list = []
for idx_img, dat in enumerate(data_split):
im_out = empty_like(src_img)
if do_reshape:
im_out.data = dat.reshape(tuple([x for (idx_shape, x) in enumerate(data.shape) if idx_shape != dim]))
else:
im_out.data = dat
im_out.absolutepath = add_suffix(src_img.absolutepath, "_{}{}".format(dim_list[dim].upper(), str(idx_img).zfill(4)))
im_out_list.append(im_out)
return im_out_list
def concat_warp2d(fname_list, fname_warp3d, fname_dest):
"""
Concatenate 2d warping fields into a 3d warping field along z dimension. The 3rd dimension of the resulting warping
field will be zeroed.
:param fname_list: list of 2d warping fields (along X and Y).
:param fname_warp3d: output name of 3d warping field
:param fname_dest: 3d destination file (used to copy header information)
:return: none
"""
nx, ny = nib.load(fname_list[0]).shape[0:2]
nz = len(fname_list)
warp3d = np.zeros([nx, ny, nz, 1, 3])
for iz, fname in enumerate(fname_list):
warp2d = nib.load(fname).get_data()
warp3d[:, :, iz, 0, 0] = warp2d[:, :, 0, 0, 0]
warp3d[:, :, iz, 0, 1] = warp2d[:, :, 0, 0, 1]
del warp2d
# save new image
im_dest = nib.load(fname_dest)
affine_dest = im_dest.get_affine()
im_warp3d = nib.nifti1.Nifti1Image(warp3d, affine_dest)
# set "intent" code to vector, to be interpreted as warping field
im_warp3d.header.set_intent('vector', (), '')
nib.save(im_warp3d, fname_warp3d)
def add_suffix(fname, suffix):
"""
Add suffix between end of file name and extension.
:param fname: absolute or relative file name. Example: t2.nii
:param suffix: suffix. Example: _mean
:return: file name with suffix. Example: t2_mean.nii
Examples:
.. code:: python
add_suffix(t2.nii, _mean) -> t2_mean.nii
add_suffix(t2.nii.gz, a) -> t2a.nii.gz
"""
stem, ext = splitext(fname)
return os.path.join(stem + suffix + ext)
def splitext(fname):
"""
Split a fname (folder/file + ext) into a folder/file and extension.
Note: for .nii.gz the extension is understandably .nii.gz, not .gz
(``os.path.splitext()`` would want to do the latter, hence the special case).
"""
dir_, filename = os.path.split(fname)
for special_ext in ['.nii.gz', '.tar.gz']:
if filename.endswith(special_ext):
stem, ext = filename[:-len(special_ext)], special_ext
break
else:
stem, ext = os.path.splitext(filename)
return os.path.join(dir_, stem), ext
def check_dim(fname, dim_lst=[3]):
"""
Check if input dimension matches the input dimension requirements specified in the dim list.
Example: to check if an image is 2D or 3D: check_dim(my_file, dim_lst=[2, 3])
:param fname:
:return: True or False
"""
dim = Image(fname).hdr['dim'][:4]
if not dim[0] in dim_lst:
raise ValueError(f"File {fname} has {dim[0]} dimensions! Accepted dimensions are: {dim_lst}.")
def generate_output_file(fname_in, fname_out, squeeze_data=True, verbose=1):
"""
Copy fname_in to fname_out with a few convenient checks: make sure input file exists, if fname_out exists send a
warning, if input and output NIFTI format are different (nii vs. nii.gz) convert by unzipping or zipping, and
display nice message at the end.
:param fname_in:
:param fname_out:
:param verbose:
:return: fname_out
"""
path_in, file_in, ext_in = extract_fname(fname_in)
path_out, file_out, ext_out = extract_fname(fname_out)
# create output path (ignore if it already exists)
pathlib.Path(path_out).mkdir(parents=True, exist_ok=True)
# if input image does not exist, give error
if not os.path.isfile(fname_in):
raise IOError(f"File {fname_in} is not a regular file!")
# if input and output fnames are the same, do nothing and exit function
if fname_in == fname_out:
logger.info("File created: %s", os.path.join(path_out, file_out + ext_out))
return os.path.join(path_out, file_out + ext_out)
# if fname_out already exists in nii or nii.gz format
if os.path.isfile(os.path.join(path_out, file_out + ext_out)):
logger.warning(f"File {os.path.join(path_out, file_out + ext_out)} already exists. Deleting it..")
os.remove(os.path.join(path_out, file_out + ext_out))
if ext_in != ext_out:
img = Image(fname_in)
img = convert(img, squeeze_data=squeeze_data)
img.save(fname_out)
else:
# Generate output file without changing the extension
shutil.move(fname_in, fname_out)
logger.info("File created: %s", os.path.join(path_out, file_out + ext_out))
return os.path.join(path_out, file_out + ext_out)
def pad_image(im: Image, pad_x_i: int = 0, pad_x_f: int = 0, pad_y_i: int = 0, pad_y_f: int = 0, pad_z_i: int = 0, pad_z_f: int = 0):
"""
Given an input image, create a copy with specified padding.
"""
nx, ny, nz, nt, px, py, pz, pt = im.dim
pad_x_i, pad_x_f, pad_y_i, pad_y_f, pad_z_i, pad_z_f = int(pad_x_i), int(pad_x_f), int(pad_y_i), int(pad_y_f), int(pad_z_i), int(pad_z_f)
if len(im.data.shape) == 2:
new_shape = list(im.data.shape)
new_shape.append(1)
im.data = im.data.reshape(new_shape)
# initialize padded_data, with same type as im.data
padded_data = np.zeros((nx + pad_x_i + pad_x_f, ny + pad_y_i + pad_y_f, nz + pad_z_i + pad_z_f), dtype=im.data.dtype)
if pad_x_f == 0:
pad_x_f = None
elif pad_x_f > 0:
pad_x_f *= -1
if pad_y_f == 0:
pad_y_f = None
elif pad_y_f > 0:
pad_y_f *= -1
if pad_z_f == 0:
pad_z_f = None
elif pad_z_f > 0:
pad_z_f *= -1
padded_data[pad_x_i:pad_x_f, pad_y_i:pad_y_f, pad_z_i:pad_z_f] = im.data
im_out = im.copy()
# TODO: Do not copy the Image(), because the dim field and hdr.get_data_shape() will not be updated properly.
# better to just create a new Image() from scratch.
im_out.data = padded_data # done after the call of the function
im_out.absolutepath = add_suffix(im_out.absolutepath, "_pad")
# adapt the origin in the sform and qform matrix
new_origin = np.dot(im_out.hdr.get_qform(), [-pad_x_i, -pad_y_i, -pad_z_i, 1])
im_out.hdr.structarr['qoffset_x'] = new_origin[0]
im_out.hdr.structarr['qoffset_y'] = new_origin[1]
im_out.hdr.structarr['qoffset_z'] = new_origin[2]
im_out.hdr.structarr['srow_x'][-1] = new_origin[0]
im_out.hdr.structarr['srow_y'][-1] = new_origin[1]
im_out.hdr.structarr['srow_z'][-1] = new_origin[2]
return im_out
HEADER_FORMATS = ('sct', 'fslhd', 'nibabel')
def create_formatted_header_string(header, output_format='sct'):
"""
Generate a string with formatted header fields for pretty-printing.
:param header: Input header to apply formatting to.
:param output_format: Specify how to format the output header.
"""
if output_format == 'sct':
formatted_fields = _apply_sct_header_formatting(fslhd.generate_nifti_fields(header))
aligned_string = _align_dict(formatted_fields)
elif output_format == 'fslhd':
formatted_fields = fslhd.generate_nifti_fields(header)
aligned_string = _align_dict(formatted_fields)
elif output_format == 'nibabel':
formatted_fields = {k: v[()] for k, v in dict(header).items()}
aligned_string = _align_dict(formatted_fields, use_tabs=False, delimiter=": ")
else:
raise ValueError(f"Can't format header using '{output_format}' format. Available formats: {HEADER_FORMATS}")
return aligned_string
def _apply_sct_header_formatting(fslhd_fields):
"""
Tweak fslhd's header fields using SCT's visual preferences.
:param fslhd_fields: Dict with fslhd's header fields.
:return modified_fields: Dict with modified header fields.
"""
modified_fields = {}
dim, pixdim = [], []
for key, value in fslhd_fields.items():
# Replace split dim fields with one-line dim field
if key.startswith('dim'):
dim.append(value)
if key == 'dim7':
modified_fields['dim'] = dim
# Replace split pixdim fields with one-line pixdim field
elif key.startswith('pixdim'):
pixdim.append(float(value))
if key == 'pixdim7':
modified_fields['pixdim'] = pixdim
# Leave all other fields
else:
modified_fields[key] = value
return modified_fields
def _align_dict(dictionary, use_tabs=True, delimiter=""):
"""
Create a string with aligned padding from a dict's keys and values.
:param dictionary: Variable of type dict.
:param use_tabs: Whether to use tabs instead of spaces for padding.
:return: String containing padded dict key/values.
"""
len_max = max([len(str(name)) for name in dictionary.keys()]) + 2
out = []
for k, v in dictionary.items():
if use_tabs:
len_max = int(8 * round(float(len_max)/8)) # Round up to the nearest 8 to align with tab stops
padding = "\t" * math.ceil((len_max - len(k))/8)
else:
padding = " " * (len_max - len(k))
out.append(f"{k}{padding}{delimiter}{v}")
return '\n'.join(out)
|
sahildua2305/nlp1-norvig | refs/heads/master | norvig-script.py | 1 | import re, collections
def words(text): return re.findall('[a-z]+', text.lower())
def train(features):
model = collections.defaultdict(lambda: 1)
for f in features:
model[f] += 1
return model
NWORDS = train(words(file('big.txt').read()))
alphabet = 'abcdefghijklmnopqrstuvwxyz'
def edits1(word):
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [a + b[1:] for a, b in splits if b]
transposes = [a + b[1] + b[0] + b[2:] for a, b in splits if len(b)>1]
replaces = [a + c + b[1:] for a, b in splits for c in alphabet if b]
inserts = [a + c + b for a, b in splits for c in alphabet]
return set(deletes + transposes + replaces + inserts)
def known_edits2(word):
return set(e2 for e1 in edits1(word) for e2 in edits1(e1) if e2 in NWORDS)
def known(words): return set(w for w in words if w in NWORDS)
def correct(word):
candidates = known([word]) or known(edits1(word)) or known_edits2(word) or [word]
return max(candidates, key=NWORDS.get)
print correct(raw_input())
|
kustodian/ansible | refs/heads/devel | lib/ansible/modules/cloud/amazon/aws_batch_job_definition.py | 5 | #!/usr/bin/python
# Copyright (c) 2017 Jon Meran <jonathan.meran@sonos.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_batch_job_definition
short_description: Manage AWS Batch Job Definitions
description:
- This module allows the management of AWS Batch Job Definitions.
It is idempotent and supports "Check" mode. Use module M(aws_batch_compute_environment) to manage the compute
environment, M(aws_batch_job_queue) to manage job queues, M(aws_batch_job_definition) to manage job definitions.
version_added: "2.5"
author: Jon Meran (@jonmer85)
options:
job_definition_arn:
description:
- The ARN for the job definition.
type: str
job_definition_name:
description:
- The name for the job definition.
required: true
type: str
state:
description:
- Describes the desired state.
default: "present"
choices: ["present", "absent"]
type: str
type:
description:
- The type of job definition.
required: true
type: str
parameters:
description:
- Default parameter substitution placeholders to set in the job definition. Parameters are specified as a
key-value pair mapping. Parameters in a SubmitJob request override any corresponding parameter defaults from
the job definition.
type: dict
image:
description:
- The image used to start a container. This string is passed directly to the Docker daemon. Images in the Docker
Hub registry are available by default. Other repositories are specified with `` repository-url /image <colon>tag ``.
Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes,
and number signs are allowed. This parameter maps to Image in the Create a container section of the Docker
Remote API and the IMAGE parameter of docker run.
required: true
type: str
vcpus:
description:
- The number of vCPUs reserved for the container. This parameter maps to CpuShares in the Create a container
section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to
1,024 CPU shares.
required: true
type: int
memory:
description:
- The hard limit (in MiB) of memory to present to the container. If your container attempts to exceed the memory
specified here, the container is killed. This parameter maps to Memory in the Create a container section of the
Docker Remote API and the --memory option to docker run.
required: true
type: int
command:
description:
- The command that is passed to the container. This parameter maps to Cmd in the Create a container section of
the Docker Remote API and the COMMAND parameter to docker run. For more information,
see U(https://docs.docker.com/engine/reference/builder/#cmd).
type: list
elements: str
job_role_arn:
description:
- The Amazon Resource Name (ARN) of the IAM role that the container can assume for AWS permissions.
type: str
volumes:
description:
- A list of data volumes used in a job.
suboptions:
host:
description:
- The contents of the host parameter determine whether your data volume persists on the host container
instance and where it is stored. If the host parameter is empty, then the Docker daemon assigns a host
path for your data volume, but the data is not guaranteed to persist after the containers associated with
it stop running.
This is a dictionary with one property, sourcePath - The path on the host container
instance that is presented to the container. If this parameter is empty,then the Docker daemon has assigned
a host path for you. If the host parameter contains a sourcePath file location, then the data volume
persists at the specified location on the host container instance until you delete it manually. If the
sourcePath value does not exist on the host container instance, the Docker daemon creates it. If the
location does exist, the contents of the source path folder are exported.
name:
description:
- The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are
allowed. This name is referenced in the sourceVolume parameter of container definition mountPoints.
type: list
elements: dict
environment:
description:
- The environment variables to pass to a container. This parameter maps to Env in the Create a container section
of the Docker Remote API and the --env option to docker run.
suboptions:
name:
description:
- The name of the key value pair. For environment variables, this is the name of the environment variable.
value:
description:
- The value of the key value pair. For environment variables, this is the value of the environment variable.
type: list
elements: dict
mount_points:
description:
- The mount points for data volumes in your container. This parameter maps to Volumes in the Create a container
section of the Docker Remote API and the --volume option to docker run.
suboptions:
containerPath:
description:
- The path on the container at which to mount the host volume.
readOnly:
description:
- If this value is true , the container has read-only access to the volume; otherwise, the container can write
to the volume. The default value is C(false).
sourceVolume:
description:
- The name of the volume to mount.
type: list
elements: dict
readonly_root_filesystem:
description:
- When this parameter is true, the container is given read-only access to its root file system. This parameter
maps to ReadonlyRootfs in the Create a container section of the Docker Remote API and the --read-only option
to docker run.
type: str
privileged:
description:
- When this parameter is true, the container is given elevated privileges on the host container instance
(similar to the root user). This parameter maps to Privileged in the Create a container section of the
Docker Remote API and the --privileged option to docker run.
type: str
ulimits:
description:
- A list of ulimits to set in the container. This parameter maps to Ulimits in the Create a container section
of the Docker Remote API and the --ulimit option to docker run.
suboptions:
hardLimit:
description:
- The hard limit for the ulimit type.
name:
description:
- The type of the ulimit.
softLimit:
description:
- The soft limit for the ulimit type.
type: list
elements: dict
user:
description:
- The user name to use inside the container. This parameter maps to User in the Create a container section of
the Docker Remote API and the --user option to docker run.
type: str
attempts:
description:
- Retry strategy - The number of times to move a job to the RUNNABLE status. You may specify between 1 and 10
attempts. If attempts is greater than one, the job is retried if it fails until it has moved to RUNNABLE that
many times.
type: int
requirements:
- boto3
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
---
- hosts: localhost
gather_facts: no
vars:
state: present
tasks:
- name: My Batch Job Definition
batch_job_definition:
job_definition_name: My Batch Job Definition
state: present
type: container
parameters:
Param1: Val1
Param2: Val2
image: <Docker Image URL>
vcpus: 1
memory: 512
command:
- python
- run_my_script.py
- arg1
job_role_arn: <Job Role ARN>
attempts: 3
register: job_definition_create_result
- name: show results
debug: var=job_definition_create_result
'''
RETURN = '''
---
output:
description: "returns what action was taken, whether something was changed, invocation and response"
returned: always
sample:
batch_job_definition_action: none
changed: false
response:
job_definition_arn: "arn:aws:batch:...."
job_definition_name: <name>
status: INACTIVE
type: container
type: dict
'''
from ansible.module_utils._text import to_native
from ansible.module_utils.aws.batch import AWSConnection, cc, set_api_params
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import ec2_argument_spec, HAS_BOTO3
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
import traceback
try:
from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError
except ImportError:
pass # Handled by HAS_BOTO3
# ---------------------------------------------------------------------------------------------------
#
# Helper Functions & classes
#
# ---------------------------------------------------------------------------------------------------
# logger = logging.getLogger()
# logging.basicConfig(filename='ansible_debug.log')
# logger.setLevel(logging.DEBUG)
def validate_params(module, aws):
"""
Performs basic parameter validation.
:param module:
:param aws:
:return:
"""
return
# ---------------------------------------------------------------------------------------------------
#
# Batch Job Definition functions
#
# ---------------------------------------------------------------------------------------------------
def get_current_job_definition(module, connection):
try:
environments = connection.client().describe_job_definitions(
jobDefinitionName=module.params['job_definition_name']
)
if len(environments['jobDefinitions']) > 0:
latest_revision = max(map(lambda d: d['revision'], environments['jobDefinitions']))
latest_definition = next((x for x in environments['jobDefinitions'] if x['revision'] == latest_revision),
None)
return latest_definition
return None
except ClientError:
return None
def create_job_definition(module, aws):
"""
Adds a Batch job definition
:param module:
:param aws:
:return:
"""
client = aws.client('batch')
changed = False
# set API parameters
api_params = set_api_params(module, get_base_params())
container_properties_params = set_api_params(module, get_container_property_params())
retry_strategy_params = set_api_params(module, get_retry_strategy_params())
api_params['retryStrategy'] = retry_strategy_params
api_params['containerProperties'] = container_properties_params
try:
if not module.check_mode:
client.register_job_definition(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error registering job definition: {0}'.format(to_native(e)),
exception=traceback.format_exc())
return changed
def get_retry_strategy_params():
return 'attempts',
def get_container_property_params():
return ('image', 'vcpus', 'memory', 'command', 'job_role_arn', 'volumes', 'environment', 'mount_points',
'readonly_root_filesystem', 'privileged', 'ulimits', 'user')
def get_base_params():
return 'job_definition_name', 'type', 'parameters'
def get_compute_environment_order_list(module):
compute_environment_order_list = []
for ceo in module.params['compute_environment_order']:
compute_environment_order_list.append(dict(order=ceo['order'], computeEnvironment=ceo['compute_environment']))
return compute_environment_order_list
def remove_job_definition(module, aws):
"""
Remove a Batch job definition
:param module:
:param aws:
:return:
"""
client = aws.client('batch')
changed = False
try:
if not module.check_mode:
client.deregister_job_definition(jobDefinition=module.params['job_definition_arn'])
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error removing job definition: {0}'.format(to_native(e)),
exception=traceback.format_exc())
return changed
def job_definition_equal(module, current_definition):
equal = True
for param in get_base_params():
if module.params.get(param) != current_definition.get(cc(param)):
equal = False
break
for param in get_container_property_params():
if module.params.get(param) != current_definition.get('containerProperties').get(cc(param)):
equal = False
break
for param in get_retry_strategy_params():
if module.params.get(param) != current_definition.get('retryStrategy').get(cc(param)):
equal = False
break
return equal
def manage_state(module, aws):
changed = False
current_state = 'absent'
state = module.params['state']
job_definition_name = module.params['job_definition_name']
action_taken = 'none'
response = None
check_mode = module.check_mode
# check if the job definition exists
current_job_definition = get_current_job_definition(module, aws)
if current_job_definition:
current_state = 'present'
if state == 'present':
if current_state == 'present':
# check if definition has changed and register a new version if necessary
if not job_definition_equal(module, current_job_definition):
create_job_definition(module, aws)
action_taken = 'updated with new version'
changed = True
else:
# Create Job definition
changed = create_job_definition(module, aws)
action_taken = 'added'
response = get_current_job_definition(module, aws)
if not response:
module.fail_json(msg='Unable to get job definition information after creating/updating')
else:
if current_state == 'present':
# remove the Job definition
changed = remove_job_definition(module, aws)
action_taken = 'deregistered'
return dict(changed=changed, batch_job_definition_action=action_taken, response=response)
# ---------------------------------------------------------------------------------------------------
#
# MAIN
#
# ---------------------------------------------------------------------------------------------------
def main():
"""
Main entry point.
:return dict: ansible facts
"""
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
state=dict(required=False, default='present', choices=['present', 'absent']),
job_definition_name=dict(required=True),
job_definition_arn=dict(),
type=dict(required=True),
parameters=dict(type='dict'),
image=dict(required=True),
vcpus=dict(type='int', required=True),
memory=dict(type='int', required=True),
command=dict(type='list', default=[]),
job_role_arn=dict(),
volumes=dict(type='list', default=[]),
environment=dict(type='list', default=[]),
mount_points=dict(type='list', default=[]),
readonly_root_filesystem=dict(),
privileged=dict(),
ulimits=dict(type='list', default=[]),
user=dict(),
attempts=dict(type='int'),
region=dict(aliases=['aws_region', 'ec2_region'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
# validate dependencies
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required for this module.')
aws = AWSConnection(module, ['batch'])
validate_params(module, aws)
results = manage_state(module, aws)
module.exit_json(**camel_dict_to_snake_dict(results))
if __name__ == '__main__':
main()
|
was4444/chromium.src | refs/heads/nw15 | tools/android/loading/page_track_unittest.py | 15 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import devtools_monitor
from page_track import PageTrack
class MockDevToolsConnection(object):
def __init__(self):
self.stop_has_been_called = False
def RegisterListener(self, name, listener):
pass
def StopMonitoring(self):
self.stop_has_been_called = True
class PageTrackTest(unittest.TestCase):
_EVENTS = [{'method': 'Page.frameStartedLoading',
'params': {'frameId': '1234.1'}},
{'method': 'Page.frameAttached',
'params': {'frameId': '1234.12', 'parentFrameId': '1234.1'}},
{'method': 'Page.frameStartedLoading',
'params': {'frameId': '1234.12'}},
{'method': 'Page.frameStoppedLoading',
'params': {'frameId': '1234.12'}},
{'method': 'Page.frameStoppedLoading',
'params': {'frameId': '1234.1'}}]
def testAsksMonitoringToStop(self):
devtools_connection = MockDevToolsConnection()
page_track = PageTrack(devtools_connection)
for msg in PageTrackTest._EVENTS[:-1]:
page_track.Handle(msg['method'], msg)
self.assertFalse(devtools_connection.stop_has_been_called)
msg = PageTrackTest._EVENTS[-1]
page_track.Handle(msg['method'], msg)
self.assertTrue(devtools_connection.stop_has_been_called)
def testUnknownParent(self):
page_track = PageTrack(None)
msg = {'method': 'Page.frameAttached',
'params': {'frameId': '1234.12', 'parentFrameId': '1234.1'}}
with self.assertRaises(AssertionError):
page_track.Handle(msg['method'], msg)
def testStopsLoadingUnknownFrame(self):
page_track = PageTrack(None)
msg = {'method': 'Page.frameStoppedLoading',
'params': {'frameId': '1234.12'}}
with self.assertRaises(AssertionError):
page_track.Handle(msg['method'], msg)
def testGetMainFrameId(self):
devtools_connection = MockDevToolsConnection()
page_track = PageTrack(devtools_connection)
for msg in PageTrackTest._EVENTS:
page_track.Handle(msg['method'], msg)
self.assertEquals('1234.1', page_track.GetMainFrameId())
if __name__ == '__main__':
unittest.main()
|
kotky/smart-apps | refs/heads/master | smartapps/urls.py | 1 | from django.conf.urls import include, url
from django.contrib import admin
admin.autodiscover()
import multimeter.views
# Examples:
# url(r'^$', 'smartapps.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
urlpatterns = [
url(r'^$', multimeter.views.index, name='index'),
url(r'^db', multimeter.views.db, name='db'),
url(r'^admin/', include(admin.site.urls)),
url(r'^multimeter/', include('multimeter.urls')),
]
|
Ffreasy/crazyflie-clients-python | refs/heads/develop | lib/cfzmq.py | 7 | # -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2015 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""
Server used to connect to a Crazyflie using ZMQ.
"""
import sys
import os
import logging
import signal
import zmq
import Queue
from threading import Thread
import cflib.crtp
from cflib.crazyflie import Crazyflie
from cflib.crazyflie.log import LogConfig
if os.name == 'posix':
print 'Disabling standard output for libraries!'
stdout = os.dup(1)
os.dup2(os.open('/dev/null', os.O_WRONLY), 1)
sys.stdout = os.fdopen(stdout, 'w')
# set SDL to use the dummy NULL video driver,
# so it doesn't need a windowing system.
os.environ["SDL_VIDEODRIVER"] = "dummy"
# Main command socket for control (ping/pong)
ZMQ_SRV_PORT = 2000
# Log data socket (publish)
ZMQ_LOG_PORT = 2001
# Param value updated (publish)
ZMQ_PARAM_PORT = 2002
# Async event for connection, like connection lost (publish)
ZMQ_CONN_PORT = 2003
# Control set-poins for Crazyflie (pull)
ZMQ_CTRL_PORT = 2004
# Timeout before giving up when verifying param write
PARAM_TIMEOUT = 2
# Timeout before giving up connection
CONNECT_TIMEOUT = 5
# Timeout before giving up adding/starting log config
LOG_TIMEOUT = 10
logger = logging.getLogger(__name__)
class _SrvThread(Thread):
def __init__(self, socket, log_socket, param_socket, conn_socket, cf, *args):
super(_SrvThread, self).__init__(*args)
self._socket = socket
self._log_socket = log_socket
self._param_socket = param_socket
self._conn_socket = conn_socket
self._cf = cf
self._cf.connected.add_callback(self._connected)
self._cf.connection_failed.add_callback(self._connection_failed)
self._cf.connection_lost.add_callback(self._connection_lost)
self._cf.disconnected.add_callback(self._disconnected)
self._cf.connection_requested.add_callback(self._connection_requested)
self._cf.param.all_updated.add_callback(self._tocs_updated)
self._cf.param.all_update_callback.add_callback(self._all_param_update)
self._conn_queue = Queue.Queue(1)
self._param_queue = Queue.Queue(1)
self._log_started_queue = Queue.Queue(1)
self._log_added_queue = Queue.Queue(1)
self._logging_configs = {}
def _connection_requested(self, uri):
conn_ev = {"version": 1, "event": "requested", "uri": uri}
self._conn_socket.send_json(conn_ev)
def _connected(self, uri):
conn_ev = {"version": 1, "event": "connected", "uri": uri}
self._conn_socket.send_json(conn_ev)
def _connection_failed(self, uri, msg):
logger.info("Connection failed to {}: {}".format(uri, msg))
resp = {"version": 1, "status": 1, "msg": msg}
self._conn_queue.put_nowait(resp)
conn_ev = {"version": 1, "event": "failed", "uri": uri, "msg": msg}
self._conn_socket.send_json(conn_ev)
def _connection_lost(self, uri, msg):
conn_ev = {"version": 1, "event": "lost", "uri": uri, "msg": msg}
self._conn_socket.send_json(conn_ev)
def _disconnected(self, uri):
conn_ev = {"version": 1, "event": "disconnected", "uri": uri}
self._conn_socket.send_json(conn_ev)
def _tocs_updated(self):
# First do the log
log_toc = self._cf.log.toc.toc
log = {}
for group in log_toc:
log[group] = {}
for name in log_toc[group]:
log[group][name] = {"type": log_toc[group][name].ctype}
# The the params
param_toc = self._cf.param.toc.toc
param = {}
for group in param_toc:
param[group] = {}
for name in param_toc[group]:
param[group][name] = {
"type": param_toc[group][name].ctype,
"access": "RW" if param_toc[group][name].access == 0 else "RO",
"value": self._cf.param.values[group][name]}
resp = {"version": 1, "status": 0, "log": log, "param": param}
self._conn_queue.put_nowait(resp)
def _handle_scanning(self):
resp = {"version": 1}
interfaces = cflib.crtp.scan_interfaces()
resp["interfaces"] = []
for i in interfaces:
resp["interfaces"].append({"uri": i[0], "info": i[1]})
return resp
def _handle_connect(self, uri):
self._cf.open_link(uri)
return self._conn_queue.get(block=True)
def _logging_started(self, conf, started):
out = {"version": 1, "name": conf.name}
if started:
out["event"] = "started"
else:
out["event"] = "stopped"
self._log_socket.send_json(out)
self._log_started_queue.put_nowait(started)
def _logging_added(self, conf, added):
out = {"version": 1, "name": conf.name}
if added:
out["event"] = "created"
else:
out["event"] = "deleted"
self._log_socket.send_json(out)
self._log_added_queue.put_nowait(added)
def _handle_logging(self, data):
resp = {"version": 1}
if data["action"] == "create":
lg = LogConfig(data["name"], data["period"])
for v in data["variables"]:
lg.add_variable(v)
lg.started_cb.add_callback(self._logging_started)
lg.added_cb.add_callback(self._logging_added)
try:
lg.data_received_cb.add_callback(self._logdata_callback)
self._logging_configs[data["name"]] = lg
self._cf.log.add_config(lg)
lg.create()
self._log_added_queue.get(block=True, timeout=LOG_TIMEOUT)
resp["status"] = 0
except KeyError as e:
resp["status"] = 1
resp["msg"] = str(e)
except AttributeError as e:
resp["status"] = 2
resp["msg"] = str(e)
except Queue.Empty:
resp["status"] = 3
resp["msg"] = "Log configuration did not start"
if data["action"] == "start":
try:
self._logging_configs[data["name"]].start()
self._log_started_queue.get(block=True, timeout=LOG_TIMEOUT)
resp["status"] = 0
except KeyError as e:
resp["status"] = 1
resp["msg"] = "{} config not found".format(str(e))
except Queue.Empty:
resp["status"] = 2
resp["msg"] = "Log configuration did not stop"
if data["action"] == "stop":
try:
self._logging_configs[data["name"]].stop()
self._log_started_queue.get(block=True, timeout=LOG_TIMEOUT)
resp["status"] = 0
except KeyError as e:
resp["status"] = 1
resp["msg"] = "{} config not found".format(str(e))
except Queue.Empty:
resp["status"] = 2
resp["msg"] = "Log configuration did not stop"
if data["action"] == "delete":
try:
self._logging_configs[data["name"]].delete()
self._log_added_queue.get(block=True, timeout=LOG_TIMEOUT)
resp["status"] = 0
except KeyError as e:
resp["status"] = 1
resp["msg"] = "{} config not found".format(str(e))
except Queue.Empty:
resp["status"] = 2
resp["msg"] = "Log configuration did not stop"
return resp
def _handle_param(self, data):
resp = {"version": 1}
group = data["name"].split(".")[0]
name = data["name"].split(".")[1]
self._cf.param.add_update_callback(group=group, name=name,
cb=self._param_callback)
try:
self._cf.param.set_value(data["name"], str(data["value"]))
answer = self._param_queue.get(block=True, timeout=PARAM_TIMEOUT)
resp["name"] = answer["name"]
resp["value"] = answer["value"]
resp["status"] = 0
except KeyError as e:
resp["status"] = 1
resp["msg"] = str(e)
except AttributeError as e:
resp["status"] = 2
resp["msg"] = str(e)
except Queue.Empty:
resp["status"] = 3
resp["msg"] = "Timeout when setting parameter" \
"{}".format(data["name"])
return resp
def _all_param_update(self, name, value):
resp = {"version": 1, "name": name, "value": value}
self._param_socket.send_json(resp)
def _param_callback(self, name, value):
group = name.split(".")[0]
name_short = name.split(".")[1]
self._cf.param.remove_update_callback(group=group, name=name_short)
self._param_queue.put_nowait({"name": name, "value": value})
def _logdata_callback(self, ts, data, conf):
out = {"version": 1, "name": conf.name, "event": "data",
"timestamp": ts, "variables": {}}
for d in data:
out["variables"][d] = data[d]
self._log_socket.send_json(out)
def run(self):
logger.info("Starting server thread")
while True:
# Wait for the command
cmd = self._socket.recv_json()
response = {"version": 1}
logger.info("Got command {}".format(cmd))
if cmd["cmd"] == "scan":
response = self._handle_scanning()
elif cmd["cmd"] == "connect":
response = self._handle_connect(cmd["uri"])
elif cmd["cmd"] == "disconnect":
self._cf.close_link()
response["status"] = 0
elif cmd["cmd"] == "log":
response = self._handle_logging(cmd)
elif cmd["cmd"] == "param":
response = self._handle_param(cmd)
else:
response["status"] = 0xFF
response["msg"] = "Unknown command {}".format(cmd["cmd"])
self._socket.send_json(response)
class _CtrlThread(Thread):
def __init__(self, socket, cf, *args):
super(_CtrlThread, self).__init__(*args)
self._socket = socket
self._cf = cf
def run(self):
while True:
cmd = self._socket.recv_json()
self._cf.commander.send_setpoint(cmd["roll"], cmd["pitch"],
cmd["yaw"], cmd["thrust"])
class ZMQServer():
"""Crazyflie ZMQ server"""
def __init__(self, base_url):
"""Start threads and bind ports"""
cflib.crtp.init_drivers(enable_debug_driver=True)
self._cf = Crazyflie(ro_cache=sys.path[0]+"/cflib/cache",
rw_cache=sys.path[1]+"/cache")
signal.signal(signal.SIGINT, signal.SIG_DFL)
self._base_url = base_url
self._context = zmq.Context()
cmd_srv = self._bind_zmq_socket(zmq.REP, "cmd", ZMQ_SRV_PORT)
log_srv = self._bind_zmq_socket(zmq.PUB, "log", ZMQ_LOG_PORT)
param_srv = self._bind_zmq_socket(zmq.PUB, "param", ZMQ_PARAM_PORT)
ctrl_srv = self._bind_zmq_socket(zmq.PULL, "ctrl", ZMQ_CTRL_PORT)
conn_srv = self._bind_zmq_socket(zmq.PUB, "conn", ZMQ_CONN_PORT)
self._scan_thread = _SrvThread(cmd_srv, log_srv, param_srv, conn_srv,
self._cf)
self._scan_thread.start()
self._ctrl_thread = _CtrlThread(ctrl_srv, self._cf)
self._ctrl_thread.start()
def _bind_zmq_socket(self, pattern, name, port):
srv = self._context.socket(pattern)
srv_addr = "{}:{}".format(self._base_url, port)
srv.bind(srv_addr)
logger.info("Biding ZMQ {} server"
"at {}".format(name, srv_addr))
return srv
def main():
"""Main Crazyflie ZMQ application"""
import argparse
parser = argparse.ArgumentParser(prog="cfzmq")
parser.add_argument("-u", "--url", action="store", dest="url", type=str,
default="tcp://127.0.0.1",
help="URL where ZMQ will accept connections")
parser.add_argument("-d", "--debug", action="store_true", dest="debug",
help="Enable debug output")
(args, unused) = parser.parse_known_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
ZMQServer(args.url)
# CRTL-C to exit
|
t794104/ansible | refs/heads/devel | lib/ansible/modules/cloud/amazon/ec2_vpc_endpoint_info.py | 12 | #!/usr/bin/python
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: ec2_vpc_endpoint_info
short_description: Retrieves AWS VPC endpoints details using AWS methods.
description:
- Gets various details related to AWS VPC Endpoints
- This module was called C(ec2_vpc_endpoint_facts) before Ansible 2.9. The usage did not change.
version_added: "2.4"
requirements: [ boto3 ]
options:
query:
description:
- Specifies the query action to take. Services returns the supported
AWS services that can be specified when creating an endpoint.
required: True
choices:
- services
- endpoints
vpc_endpoint_ids:
description:
- Get details of specific endpoint IDs
- Provide this value as a list
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value.
See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcEndpoints.html)
for possible filters.
author: Karen Cheng (@Etherdaemon)
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Simple example of listing all support AWS services for VPC endpoints
- name: List supported AWS endpoint services
ec2_vpc_endpoint_info:
query: services
region: ap-southeast-2
register: supported_endpoint_services
- name: Get all endpoints in ap-southeast-2 region
ec2_vpc_endpoint_info:
query: endpoints
region: ap-southeast-2
register: existing_endpoints
- name: Get all endpoints with specific filters
ec2_vpc_endpoint_info:
query: endpoints
region: ap-southeast-2
filters:
vpc-id:
- vpc-12345678
- vpc-87654321
vpc-endpoint-state:
- available
- pending
register: existing_endpoints
- name: Get details on specific endpoint
ec2_vpc_endpoint_info:
query: endpoints
region: ap-southeast-2
vpc_endpoint_ids:
- vpce-12345678
register: endpoint_details
'''
RETURN = '''
service_names:
description: AWS VPC endpoint service names
returned: I(query) is C(services)
type: list
sample:
service_names:
- com.amazonaws.ap-southeast-2.s3
vpc_endpoints:
description:
- A list of endpoints that match the query. Each endpoint has the keys creation_timestamp,
policy_document, route_table_ids, service_name, state, vpc_endpoint_id, vpc_id.
returned: I(query) is C(endpoints)
type: list
sample:
vpc_endpoints:
- creation_timestamp: "2017-02-16T11:06:48+00:00"
policy_document: >
"{\"Version\":\"2012-10-17\",\"Id\":\"Policy1450910922815\",
\"Statement\":[{\"Sid\":\"Stmt1450910920641\",\"Effect\":\"Allow\",
\"Principal\":\"*\",\"Action\":\"s3:*\",\"Resource\":[\"arn:aws:s3:::*/*\",\"arn:aws:s3:::*\"]}]}"
route_table_ids:
- rtb-abcd1234
service_name: "com.amazonaws.ap-southeast-2.s3"
state: "available"
vpc_endpoint_id: "vpce-abbad0d0"
vpc_id: "vpc-1111ffff"
'''
import json
try:
import botocore
except ImportError:
pass # will be picked up from imported HAS_BOTO3
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (ec2_argument_spec, boto3_conn, get_aws_connection_info,
ansible_dict_to_boto3_filter_list, HAS_BOTO3, camel_dict_to_snake_dict, AWSRetry)
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
@AWSRetry.exponential_backoff()
def get_supported_services(client, module):
results = list()
params = dict()
while True:
response = client.describe_vpc_endpoint_services(**params)
results.extend(response['ServiceNames'])
if 'NextToken' in response:
params['NextToken'] = response['NextToken']
else:
break
return dict(service_names=results)
@AWSRetry.exponential_backoff()
def get_endpoints(client, module):
results = list()
params = dict()
params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
if module.params.get('vpc_endpoint_ids'):
params['VpcEndpointIds'] = module.params.get('vpc_endpoint_ids')
while True:
response = client.describe_vpc_endpoints(**params)
results.extend(response['VpcEndpoints'])
if 'NextToken' in response:
params['NextToken'] = response['NextToken']
else:
break
try:
results = json.loads(json.dumps(results, default=date_handler))
except Exception as e:
module.fail_json(msg=str(e.message))
return dict(vpc_endpoints=[camel_dict_to_snake_dict(result) for result in results])
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
query=dict(choices=['services', 'endpoints'], required=True),
filters=dict(default={}, type='dict'),
vpc_endpoint_ids=dict(type='list'),
)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if module._name == 'ec2_vpc_endpoint_facts':
module.deprecate("The 'ec2_vpc_endpoint_facts' module has been renamed to 'ec2_vpc_endpoint_info'", version='2.13')
# Validate Requirements
if not HAS_BOTO3:
module.fail_json(msg='botocore and boto3 are required.')
try:
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if region:
connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)
else:
module.fail_json(msg="region must be specified")
except botocore.exceptions.NoCredentialsError as e:
module.fail_json(msg=str(e))
invocations = {
'services': get_supported_services,
'endpoints': get_endpoints,
}
results = invocations[module.params.get('query')](connection, module)
module.exit_json(**results)
if __name__ == '__main__':
main()
|
IKholopov/HackUPC2017 | refs/heads/master | hackupc/env/lib/python3.5/site-packages/django/contrib/auth/migrations/0003_alter_user_email_max_length.py | 586 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auth', '0002_alter_permission_name_max_length'),
]
operations = [
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(max_length=254, verbose_name='email address', blank=True),
),
]
|
navycrow/Sick-Beard | refs/heads/development | lib/hachoir_metadata/program.py | 90 | from lib.hachoir_metadata.metadata import RootMetadata, registerExtractor
from lib.hachoir_parser.program import ExeFile
from lib.hachoir_metadata.safe import fault_tolerant, getValue
class ExeMetadata(RootMetadata):
KEY_TO_ATTR = {
u"ProductName": "title",
u"LegalCopyright": "copyright",
u"LegalTrademarks": "copyright",
u"LegalTrademarks1": "copyright",
u"LegalTrademarks2": "copyright",
u"CompanyName": "author",
u"BuildDate": "creation_date",
u"FileDescription": "title",
u"ProductVersion": "version",
}
SKIP_KEY = set((u"InternalName", u"OriginalFilename", u"FileVersion", u"BuildVersion"))
def extract(self, exe):
if exe.isPE():
self.extractPE(exe)
elif exe.isNE():
self.extractNE(exe)
def extractNE(self, exe):
if "ne_header" in exe:
self.useNE_Header(exe["ne_header"])
if "info" in exe:
self.useNEInfo(exe["info"])
@fault_tolerant
def useNEInfo(self, info):
for node in info.array("node"):
if node["name"].value == "StringFileInfo":
self.readVersionInfo(node["node[0]"])
def extractPE(self, exe):
# Read information from headers
if "pe_header" in exe:
self.usePE_Header(exe["pe_header"])
if "pe_opt_header" in exe:
self.usePE_OptHeader(exe["pe_opt_header"])
# Use PE resource
resource = exe.getResource()
if resource and "version_info/node[0]" in resource:
for node in resource.array("version_info/node[0]/node"):
if getValue(node, "name") == "StringFileInfo" \
and "node[0]" in node:
self.readVersionInfo(node["node[0]"])
@fault_tolerant
def useNE_Header(self, hdr):
if hdr["is_dll"].value:
self.format_version = u"New-style executable: Dynamic-link library (DLL)"
elif hdr["is_win_app"].value:
self.format_version = u"New-style executable: Windows 3.x application"
else:
self.format_version = u"New-style executable for Windows 3.x"
@fault_tolerant
def usePE_Header(self, hdr):
self.creation_date = hdr["creation_date"].value
self.comment = "CPU: %s" % hdr["cpu"].display
if hdr["is_dll"].value:
self.format_version = u"Portable Executable: Dynamic-link library (DLL)"
else:
self.format_version = u"Portable Executable: Windows application"
@fault_tolerant
def usePE_OptHeader(self, hdr):
self.comment = "Subsystem: %s" % hdr["subsystem"].display
def readVersionInfo(self, info):
values = {}
for node in info.array("node"):
if "value" not in node or "name" not in node:
continue
value = node["value"].value.strip(" \0")
if not value:
continue
key = node["name"].value
values[key] = value
if "ProductName" in values and "FileDescription" in values:
# Make sure that FileDescription is set before ProductName
# as title value
self.title = values["FileDescription"]
self.title = values["ProductName"]
del values["FileDescription"]
del values["ProductName"]
for key, value in values.iteritems():
if key in self.KEY_TO_ATTR:
setattr(self, self.KEY_TO_ATTR[key], value)
elif key not in self.SKIP_KEY:
self.comment = "%s=%s" % (key, value)
registerExtractor(ExeFile, ExeMetadata)
|
kant/inasafe | refs/heads/develop | safe/impact_functions/inundation/flood_vector_building_impact/test/__init__.py | 20 | __author__ = 'lucernae'
__project_name__ = 'inasafe'
__filename__ = '__init__.py'
__date__ = '18/03/15'
__copyright__ = 'lana.pcfre@gmail.com'
|
Imaginashion/cloud-vision | refs/heads/master | .fr-d0BNfn/django-jquery-file-upload/venv/lib/python3.5/site-packages/wheel/util.py | 345 | """Utility functions."""
import sys
import os
import base64
import json
import hashlib
try:
from collections import OrderedDict
except ImportError:
OrderedDict = dict
__all__ = ['urlsafe_b64encode', 'urlsafe_b64decode', 'utf8',
'to_json', 'from_json', 'matches_requirement']
def urlsafe_b64encode(data):
"""urlsafe_b64encode without padding"""
return base64.urlsafe_b64encode(data).rstrip(binary('='))
def urlsafe_b64decode(data):
"""urlsafe_b64decode without padding"""
pad = b'=' * (4 - (len(data) & 3))
return base64.urlsafe_b64decode(data + pad)
def to_json(o):
'''Convert given data to JSON.'''
return json.dumps(o, sort_keys=True)
def from_json(j):
'''Decode a JSON payload.'''
return json.loads(j)
def open_for_csv(name, mode):
if sys.version_info[0] < 3:
nl = {}
bin = 'b'
else:
nl = { 'newline': '' }
bin = ''
return open(name, mode + bin, **nl)
try:
unicode
def utf8(data):
'''Utf-8 encode data.'''
if isinstance(data, unicode):
return data.encode('utf-8')
return data
except NameError:
def utf8(data):
'''Utf-8 encode data.'''
if isinstance(data, str):
return data.encode('utf-8')
return data
try:
# For encoding ascii back and forth between bytestrings, as is repeatedly
# necessary in JSON-based crypto under Python 3
unicode
def native(s):
return s
def binary(s):
if isinstance(s, unicode):
return s.encode('ascii')
return s
except NameError:
def native(s):
if isinstance(s, bytes):
return s.decode('ascii')
return s
def binary(s):
if isinstance(s, str):
return s.encode('ascii')
class HashingFile(object):
def __init__(self, fd, hashtype='sha256'):
self.fd = fd
self.hashtype = hashtype
self.hash = hashlib.new(hashtype)
self.length = 0
def write(self, data):
self.hash.update(data)
self.length += len(data)
self.fd.write(data)
def close(self):
self.fd.close()
def digest(self):
if self.hashtype == 'md5':
return self.hash.hexdigest()
digest = self.hash.digest()
return self.hashtype + '=' + native(urlsafe_b64encode(digest))
class OrderedDefaultDict(OrderedDict):
def __init__(self, *args, **kwargs):
if not args:
self.default_factory = None
else:
if not (args[0] is None or callable(args[0])):
raise TypeError('first argument must be callable or None')
self.default_factory = args[0]
args = args[1:]
super(OrderedDefaultDict, self).__init__(*args, **kwargs)
def __missing__ (self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = default = self.default_factory()
return default
if sys.platform == 'win32':
import ctypes.wintypes
# CSIDL_APPDATA for reference - not used here for compatibility with
# dirspec, which uses LOCAL_APPDATA and COMMON_APPDATA in that order
csidl = dict(CSIDL_APPDATA=26, CSIDL_LOCAL_APPDATA=28,
CSIDL_COMMON_APPDATA=35)
def get_path(name):
SHGFP_TYPE_CURRENT = 0
buf = ctypes.create_unicode_buffer(ctypes.wintypes.MAX_PATH)
ctypes.windll.shell32.SHGetFolderPathW(0, csidl[name], 0, SHGFP_TYPE_CURRENT, buf)
return buf.value
def save_config_path(*resource):
appdata = get_path("CSIDL_LOCAL_APPDATA")
path = os.path.join(appdata, *resource)
if not os.path.isdir(path):
os.makedirs(path)
return path
def load_config_paths(*resource):
ids = ["CSIDL_LOCAL_APPDATA", "CSIDL_COMMON_APPDATA"]
for id in ids:
base = get_path(id)
path = os.path.join(base, *resource)
if os.path.exists(path):
yield path
else:
def save_config_path(*resource):
import xdg.BaseDirectory
return xdg.BaseDirectory.save_config_path(*resource)
def load_config_paths(*resource):
import xdg.BaseDirectory
return xdg.BaseDirectory.load_config_paths(*resource)
def matches_requirement(req, wheels):
"""List of wheels matching a requirement.
:param req: The requirement to satisfy
:param wheels: List of wheels to search.
"""
try:
from pkg_resources import Distribution, Requirement
except ImportError:
raise RuntimeError("Cannot use requirements without pkg_resources")
req = Requirement.parse(req)
selected = []
for wf in wheels:
f = wf.parsed_filename
dist = Distribution(project_name=f.group("name"), version=f.group("ver"))
if dist in req:
selected.append(wf)
return selected
|
cmvelo/ansible-modules-extras | refs/heads/devel | cloud/amazon/route53_health_check.py | 10 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: route53_health_check
short_description: add or delete health-checks in Amazons Route53 DNS service
description:
- Creates and deletes DNS Health checks in Amazons Route53 service
- Only the port, resource_path, string_match and request_interval are
considered when updating existing health-checks.
version_added: "2.0"
options:
state:
description:
- Specifies the action to take.
required: true
choices: [ 'present', 'absent' ]
ip_address:
description:
- IP address of the end-point to check. Either this or `fqdn` has to be
provided.
required: false
default: null
port:
description:
- The port on the endpoint on which you want Amazon Route 53 to perform
health checks. Required for TCP checks.
required: false
default: null
type:
description:
- The type of health check that you want to create, which indicates how
Amazon Route 53 determines whether an endpoint is healthy.
required: true
choices: [ 'HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP' ]
resource_path:
description:
- The path that you want Amazon Route 53 to request when performing
health checks. The path can be any value for which your endpoint will
return an HTTP status code of 2xx or 3xx when the endpoint is healthy,
for example the file /docs/route53-health-check.html.
- Required for all checks except TCP.
- The path must begin with a /
- Maximum 255 characters.
required: false
default: null
fqdn:
description:
- Domain name of the endpoint to check. Either this or `ip_address` has
to be provided. When both are given the `fqdn` is used in the `Host:`
header of the HTTP request.
required: false
string_match:
description:
- If the check type is HTTP_STR_MATCH or HTTP_STR_MATCH, the string
that you want Amazon Route 53 to search for in the response body from
the specified resource. If the string appears in the first 5120 bytes
of the response body, Amazon Route 53 considers the resource healthy.
required: false
default: null
request_interval:
description:
- The number of seconds between the time that Amazon Route 53 gets a
response from your endpoint and the time that it sends the next
health-check request.
required: true
default: 30
choices: [ 10, 30 ]
failure_threshold:
description:
- The number of consecutive health checks that an endpoint must pass or
fail for Amazon Route 53 to change the current status of the endpoint
from unhealthy to healthy or vice versa.
required: true
default: 3
choices: [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ]
author: "zimbatm (@zimbatm)"
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Create a health-check for host1.example.com and use it in record
- route53_health_check:
state: present
fqdn: host1.example.com
type: HTTP_STR_MATCH
resource_path: /
string_match: "Hello"
request_interval: 10
failure_threshold: 2
record: my_health_check
- route53:
action: create
zone: "example.com"
type: CNAME
record: "www.example.com"
value: host1.example.com
ttl: 30
# Routing policy
identifier: "host1@www"
weight: 100
health_check: "{{ my_health_check.health_check.id }}"
# Delete health-check
- route53_health_check:
state: absent
fqdn: host1.example.com
'''
import time
import uuid
try:
import boto
import boto.ec2
from boto import route53
from boto.route53 import Route53Connection, exception
from boto.route53.healthcheck import HealthCheck
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
# Things that can't get changed:
# protocol
# ip_address or domain
# request_interval
# string_match if not previously enabled
def find_health_check(conn, wanted):
"""Searches for health checks that have the exact same set of immutable values"""
for check in conn.get_list_health_checks().HealthChecks:
config = check.HealthCheckConfig
if config.get('IPAddress') == wanted.ip_addr and config.get('FullyQualifiedDomainName') == wanted.fqdn and config.get('Type') == wanted.hc_type and config.get('RequestInterval') == str(wanted.request_interval):
return check
return None
def to_health_check(config):
return HealthCheck(
config.get('IPAddress'),
config.get('Port'),
config.get('Type'),
config.get('ResourcePath'),
fqdn=config.get('FullyQualifiedDomainName'),
string_match=config.get('SearchString'),
request_interval=int(config.get('RequestInterval')),
failure_threshold=int(config.get('FailureThreshold')),
)
def health_check_diff(a, b):
a = a.__dict__
b = b.__dict__
if a == b:
return {}
diff = {}
for key in set(a.keys()) | set(b.keys()):
if a.get(key) != b.get(key):
diff[key] = b.get(key)
return diff
def to_template_params(health_check):
params = {
'ip_addr_part': '',
'port': health_check.port,
'type': health_check.hc_type,
'resource_path_part': '',
'fqdn_part': '',
'string_match_part': '',
'request_interval': health_check.request_interval,
'failure_threshold': health_check.failure_threshold,
}
if health_check.ip_addr:
params['ip_addr_part'] = HealthCheck.XMLIpAddrPart % {'ip_addr': health_check.ip_addr}
if health_check.resource_path:
params['resource_path_part'] = XMLResourcePathPart % {'resource_path': health_check.resource_path}
if health_check.fqdn:
params['fqdn_part'] = HealthCheck.XMLFQDNPart % {'fqdn': health_check.fqdn}
if health_check.string_match:
params['string_match_part'] = HealthCheck.XMLStringMatchPart % {'string_match': health_check.string_match}
return params
XMLResourcePathPart = """<ResourcePath>%(resource_path)s</ResourcePath>"""
POSTXMLBody = """
<CreateHealthCheckRequest xmlns="%(xmlns)s">
<CallerReference>%(caller_ref)s</CallerReference>
<HealthCheckConfig>
%(ip_addr_part)s
<Port>%(port)s</Port>
<Type>%(type)s</Type>
%(resource_path_part)s
%(fqdn_part)s
%(string_match_part)s
<RequestInterval>%(request_interval)s</RequestInterval>
<FailureThreshold>%(failure_threshold)s</FailureThreshold>
</HealthCheckConfig>
</CreateHealthCheckRequest>
"""
UPDATEHCXMLBody = """
<UpdateHealthCheckRequest xmlns="%(xmlns)s">
<HealthCheckVersion>%(health_check_version)s</HealthCheckVersion>
%(ip_addr_part)s
<Port>%(port)s</Port>
%(resource_path_part)s
%(fqdn_part)s
%(string_match_part)s
<FailureThreshold>%(failure_threshold)i</FailureThreshold>
</UpdateHealthCheckRequest>
"""
def create_health_check(conn, health_check, caller_ref = None):
if caller_ref is None:
caller_ref = str(uuid.uuid4())
uri = '/%s/healthcheck' % conn.Version
params = to_template_params(health_check)
params.update(xmlns=conn.XMLNameSpace, caller_ref=caller_ref)
xml_body = POSTXMLBody % params
response = conn.make_request('POST', uri, {'Content-Type': 'text/xml'}, xml_body)
body = response.read()
boto.log.debug(body)
if response.status == 201:
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
else:
raise exception.DNSServerError(response.status, response.reason, body)
def update_health_check(conn, health_check_id, health_check_version, health_check):
uri = '/%s/healthcheck/%s' % (conn.Version, health_check_id)
params = to_template_params(health_check)
params.update(
xmlns=conn.XMLNameSpace,
health_check_version=health_check_version,
)
xml_body = UPDATEHCXMLBody % params
response = conn.make_request('POST', uri, {'Content-Type': 'text/xml'}, xml_body)
body = response.read()
boto.log.debug(body)
if response.status not in (200, 204):
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state = dict(choices=['present', 'absent'], default='present'),
ip_address = dict(),
port = dict(type='int'),
type = dict(required=True, choices=['HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP']),
resource_path = dict(),
fqdn = dict(),
string_match = dict(),
request_interval = dict(type='int', choices=[10, 30], default=30),
failure_threshold = dict(type='int', choices=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], default=3),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto 2.27.0+ required for this module')
state_in = module.params.get('state')
ip_addr_in = module.params.get('ip_address')
port_in = module.params.get('port')
type_in = module.params.get('type')
resource_path_in = module.params.get('resource_path')
fqdn_in = module.params.get('fqdn')
string_match_in = module.params.get('string_match')
request_interval_in = module.params.get('request_interval')
failure_threshold_in = module.params.get('failure_threshold')
if ip_addr_in is None and fqdn_in is None:
module.fail_json(msg="parameter 'ip_address' or 'fqdn' is required")
# Default port
if port_in is None:
if type_in in ['HTTP', 'HTTP_STR_MATCH']:
port_in = 80
elif type_in in ['HTTPS', 'HTTPS_STR_MATCH']:
port_in = 443
else:
module.fail_json(msg="parameter 'port' is required for 'type' TCP")
# string_match in relation with type
if type_in in ['HTTP_STR_MATCH', 'HTTPS_STR_MATCH']:
if string_match_in is None:
module.fail_json(msg="parameter 'string_match' is required for the HTTP(S)_STR_MATCH types")
elif len(string_match_in) > 255:
module.fail_json(msg="parameter 'string_match' is limited to 255 characters max")
elif string_match_in:
module.fail_json(msg="parameter 'string_match' argument is only for the HTTP(S)_STR_MATCH types")
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
# connect to the route53 endpoint
try:
conn = Route53Connection(**aws_connect_kwargs)
except boto.exception.BotoServerError, e:
module.fail_json(msg = e.error_message)
changed = False
action = None
check_id = None
wanted_config = HealthCheck(ip_addr_in, port_in, type_in, resource_path_in, fqdn_in, string_match_in, request_interval_in, failure_threshold_in)
existing_check = find_health_check(conn, wanted_config)
if existing_check:
check_id = existing_check.Id
existing_config = to_health_check(existing_check.HealthCheckConfig)
if state_in == 'present':
if existing_check is None:
action = "create"
check_id = create_health_check(conn, wanted_config).HealthCheck.Id
changed = True
else:
diff = health_check_diff(existing_config, wanted_config)
if not diff:
action = "update"
update_health_check(conn, existing_check.Id, int(existing_check.HealthCheckVersion), wanted_config)
changed = True
elif state_in == 'absent':
if check_id:
action = "delete"
conn.delete_health_check(check_id)
changed = True
else:
module.fail_json(msg = "Logic Error: Unknown state")
module.exit_json(changed=changed, health_check=dict(id=check_id), action=action)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
|
fabianlee/blogcode | refs/heads/master | py-zabbix/ZabbixGetMetrics.py | 1 | #!/usr/bin/python
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
import time
from zabbix.api import ZabbixAPI
# argument check
if len(sys.argv)<5:
print "USAGE: zabbixURL user pass host itemName"
print "EXAMPLE: http://127.0.0.1/zabbix Admin zabbix myhost mystr1"
sys.exit(1)
# simple parse for arguments
url = sys.argv[1]
user = sys.argv[2]
password = sys.argv[3]
host = sys.argv[4]
key = sys.argv[5]
print "Going to connect to {} as {}, and retrieve from host {} the item {}".format(url,user,host,key)
# Create ZabbixAPI class instance
use_older_authenticate_method = False
zapi = ZabbixAPI(url, use_older_authenticate_method, user, password)
zversion = zapi.do_request('apiinfo.version')
print "Zabbix API version: {}".format(zversion['result'])
# https://www.zabbix.com/documentation/2.2/manual/api/reference/host/get
# Get specified host
print "----------------------------"
thehost = zapi.do_request('host.get',
{
'filter': {'host': host},
'selectItems' : 'extend',
'output': 'extend'
})
if len(thehost['result'])<1:
print "HALTING. There was no host defined in zabbix with id: {}".format(host)
sys.exit(2)
hostId = thehost['result'][0]['hostid']
print "Found host {} with id {}".format(host,hostId)
# now look for item within that host
itemId = None
for item in thehost['result'][0]['items']:
# for debugging
#print "item[{}] -> {}".format(item['itemid'],item['key_'])
# if match, then get out int id and type (0=float,1=char,3=unsign,4=text)
if item['key_'] == key:
itemId = item['itemid']
itemType = item['value_type']
if itemId is None:
print "HALTING. There was no item defined on host {} with name: {}".format(host,key)
sys.exit(2)
print "Found item {} on host {} with item id/type {}/{}".format(key,host,itemId,itemType)
# https://www.zabbix.com/documentation/2.2/manual/api/reference/history/get
print "----------------------------"
history = zapi.do_request('history.get',
{
'history': itemType,
'filter': {'host': host, 'itemid': itemId},
'limit': '5',
'sortfield': 'clock',
'sortorder': 'DESC',
'output': 'extend'
})
# show history rows
print "Retrieved {} rows of history".format(len(history['result']))
for hist in history['result']:
# convert epoch to human readable format
timestr = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(long(hist['clock'])))
print "{} @ {}".format(timestr,hist['value'])
|
grap/OCB | refs/heads/7.0 | openerp/tests/addons/test_uninstall/models.py | 94 | # -*- coding: utf-8 -*-
import openerp
from openerp.osv import fields
from openerp.osv.orm import Model
class test_uninstall_model(Model):
"""
This model uses different types of columns to make it possible to test
the uninstall feature of OpenERP.
"""
_name = 'test_uninstall.model'
_columns = {
'name': fields.char('Name', size=64),
'ref': fields.many2one('res.users', string='User'),
'rel': fields.many2many('res.users', string='Users'),
}
_sql_constraints = [
('name_uniq', 'unique (name)', 'Each name must be unique.')
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
jkern/bigcouch | refs/heads/master | couchjs/scons/scons-local-2.0.1/SCons/Tool/mslink.py | 61 | """SCons.Tool.mslink
Tool-specific initialization for the Microsoft linker.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/mslink.py 5134 2010/08/16 23:02:40 bdeegan"
import os.path
import SCons.Action
import SCons.Defaults
import SCons.Errors
import SCons.Platform.win32
import SCons.Tool
import SCons.Tool.msvc
import SCons.Tool.msvs
import SCons.Util
from MSCommon import msvc_setup_env_once, msvc_exists
def pdbGenerator(env, target, source, for_signature):
try:
return ['/PDB:%s' % target[0].attributes.pdb, '/DEBUG']
except (AttributeError, IndexError):
return None
def _dllTargets(target, source, env, for_signature, paramtp):
listCmd = []
dll = env.FindIxes(target, '%sPREFIX' % paramtp, '%sSUFFIX' % paramtp)
if dll: listCmd.append("/out:%s"%dll.get_string(for_signature))
implib = env.FindIxes(target, 'LIBPREFIX', 'LIBSUFFIX')
if implib: listCmd.append("/implib:%s"%implib.get_string(for_signature))
return listCmd
def _dllSources(target, source, env, for_signature, paramtp):
listCmd = []
deffile = env.FindIxes(source, "WINDOWSDEFPREFIX", "WINDOWSDEFSUFFIX")
for src in source:
# Check explicitly for a non-None deffile so that the __cmp__
# method of the base SCons.Util.Proxy class used for some Node
# proxies doesn't try to use a non-existent __dict__ attribute.
if deffile and src == deffile:
# Treat this source as a .def file.
listCmd.append("/def:%s" % src.get_string(for_signature))
else:
# Just treat it as a generic source file.
listCmd.append(src)
return listCmd
def windowsShlinkTargets(target, source, env, for_signature):
return _dllTargets(target, source, env, for_signature, 'SHLIB')
def windowsShlinkSources(target, source, env, for_signature):
return _dllSources(target, source, env, for_signature, 'SHLIB')
def _windowsLdmodTargets(target, source, env, for_signature):
"""Get targets for loadable modules."""
return _dllTargets(target, source, env, for_signature, 'LDMODULE')
def _windowsLdmodSources(target, source, env, for_signature):
"""Get sources for loadable modules."""
return _dllSources(target, source, env, for_signature, 'LDMODULE')
def _dllEmitter(target, source, env, paramtp):
"""Common implementation of dll emitter."""
SCons.Tool.msvc.validate_vars(env)
extratargets = []
extrasources = []
dll = env.FindIxes(target, '%sPREFIX' % paramtp, '%sSUFFIX' % paramtp)
no_import_lib = env.get('no_import_lib', 0)
if not dll:
raise SCons.Errors.UserError('A shared library should have exactly one target with the suffix: %s' % env.subst('$%sSUFFIX' % paramtp))
insert_def = env.subst("$WINDOWS_INSERT_DEF")
if not insert_def in ['', '0', 0] and \
not env.FindIxes(source, "WINDOWSDEFPREFIX", "WINDOWSDEFSUFFIX"):
# append a def file to the list of sources
extrasources.append(
env.ReplaceIxes(dll,
'%sPREFIX' % paramtp, '%sSUFFIX' % paramtp,
"WINDOWSDEFPREFIX", "WINDOWSDEFSUFFIX"))
version_num, suite = SCons.Tool.msvs.msvs_parse_version(env.get('MSVS_VERSION', '6.0'))
if version_num >= 8.0 and env.get('WINDOWS_INSERT_MANIFEST', 0):
# MSVC 8 automatically generates .manifest files that must be installed
extratargets.append(
env.ReplaceIxes(dll,
'%sPREFIX' % paramtp, '%sSUFFIX' % paramtp,
"WINDOWSSHLIBMANIFESTPREFIX", "WINDOWSSHLIBMANIFESTSUFFIX"))
if 'PDB' in env and env['PDB']:
pdb = env.arg2nodes('$PDB', target=target, source=source)[0]
extratargets.append(pdb)
target[0].attributes.pdb = pdb
if not no_import_lib and \
not env.FindIxes(target, "LIBPREFIX", "LIBSUFFIX"):
# Append an import library to the list of targets.
extratargets.append(
env.ReplaceIxes(dll,
'%sPREFIX' % paramtp, '%sSUFFIX' % paramtp,
"LIBPREFIX", "LIBSUFFIX"))
# and .exp file is created if there are exports from a DLL
extratargets.append(
env.ReplaceIxes(dll,
'%sPREFIX' % paramtp, '%sSUFFIX' % paramtp,
"WINDOWSEXPPREFIX", "WINDOWSEXPSUFFIX"))
return (target+extratargets, source+extrasources)
def windowsLibEmitter(target, source, env):
return _dllEmitter(target, source, env, 'SHLIB')
def ldmodEmitter(target, source, env):
"""Emitter for loadable modules.
Loadable modules are identical to shared libraries on Windows, but building
them is subject to different parameters (LDMODULE*).
"""
return _dllEmitter(target, source, env, 'LDMODULE')
def prog_emitter(target, source, env):
SCons.Tool.msvc.validate_vars(env)
extratargets = []
exe = env.FindIxes(target, "PROGPREFIX", "PROGSUFFIX")
if not exe:
raise SCons.Errors.UserError("An executable should have exactly one target with the suffix: %s" % env.subst("$PROGSUFFIX"))
version_num, suite = SCons.Tool.msvs.msvs_parse_version(env.get('MSVS_VERSION', '6.0'))
if version_num >= 8.0 and env.get('WINDOWS_INSERT_MANIFEST', 0):
# MSVC 8 automatically generates .manifest files that have to be installed
extratargets.append(
env.ReplaceIxes(exe,
"PROGPREFIX", "PROGSUFFIX",
"WINDOWSPROGMANIFESTPREFIX", "WINDOWSPROGMANIFESTSUFFIX"))
if 'PDB' in env and env['PDB']:
pdb = env.arg2nodes('$PDB', target=target, source=source)[0]
extratargets.append(pdb)
target[0].attributes.pdb = pdb
return (target+extratargets,source)
def RegServerFunc(target, source, env):
if 'register' in env and env['register']:
ret = regServerAction([target[0]], [source[0]], env)
if ret:
raise SCons.Errors.UserError("Unable to register %s" % target[0])
else:
print "Registered %s sucessfully" % target[0]
return ret
return 0
regServerAction = SCons.Action.Action("$REGSVRCOM", "$REGSVRCOMSTR")
regServerCheck = SCons.Action.Action(RegServerFunc, None)
shlibLinkAction = SCons.Action.Action('${TEMPFILE("$SHLINK $SHLINKFLAGS $_SHLINK_TARGETS $_LIBDIRFLAGS $_LIBFLAGS $_PDB $_SHLINK_SOURCES")}')
compositeShLinkAction = shlibLinkAction + regServerCheck
ldmodLinkAction = SCons.Action.Action('${TEMPFILE("$LDMODULE $LDMODULEFLAGS $_LDMODULE_TARGETS $_LIBDIRFLAGS $_LIBFLAGS $_PDB $_LDMODULE_SOURCES")}')
compositeLdmodAction = ldmodLinkAction + regServerCheck
def generate(env):
"""Add Builders and construction variables for ar to an Environment."""
SCons.Tool.createSharedLibBuilder(env)
SCons.Tool.createProgBuilder(env)
env['SHLINK'] = '$LINK'
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS /dll')
env['_SHLINK_TARGETS'] = windowsShlinkTargets
env['_SHLINK_SOURCES'] = windowsShlinkSources
env['SHLINKCOM'] = compositeShLinkAction
env.Append(SHLIBEMITTER = [windowsLibEmitter])
env['LINK'] = 'link'
env['LINKFLAGS'] = SCons.Util.CLVar('/nologo')
env['_PDB'] = pdbGenerator
env['LINKCOM'] = '${TEMPFILE("$LINK $LINKFLAGS /OUT:$TARGET.windows $_LIBDIRFLAGS $_LIBFLAGS $_PDB $SOURCES.windows")}'
env.Append(PROGEMITTER = [prog_emitter])
env['LIBDIRPREFIX']='/LIBPATH:'
env['LIBDIRSUFFIX']=''
env['LIBLINKPREFIX']=''
env['LIBLINKSUFFIX']='$LIBSUFFIX'
env['WIN32DEFPREFIX'] = ''
env['WIN32DEFSUFFIX'] = '.def'
env['WIN32_INSERT_DEF'] = 0
env['WINDOWSDEFPREFIX'] = '${WIN32DEFPREFIX}'
env['WINDOWSDEFSUFFIX'] = '${WIN32DEFSUFFIX}'
env['WINDOWS_INSERT_DEF'] = '${WIN32_INSERT_DEF}'
env['WIN32EXPPREFIX'] = ''
env['WIN32EXPSUFFIX'] = '.exp'
env['WINDOWSEXPPREFIX'] = '${WIN32EXPPREFIX}'
env['WINDOWSEXPSUFFIX'] = '${WIN32EXPSUFFIX}'
env['WINDOWSSHLIBMANIFESTPREFIX'] = ''
env['WINDOWSSHLIBMANIFESTSUFFIX'] = '${SHLIBSUFFIX}.manifest'
env['WINDOWSPROGMANIFESTPREFIX'] = ''
env['WINDOWSPROGMANIFESTSUFFIX'] = '${PROGSUFFIX}.manifest'
env['REGSVRACTION'] = regServerCheck
env['REGSVR'] = os.path.join(SCons.Platform.win32.get_system_root(),'System32','regsvr32')
env['REGSVRFLAGS'] = '/s '
env['REGSVRCOM'] = '$REGSVR $REGSVRFLAGS ${TARGET.windows}'
# Set-up ms tools paths
msvc_setup_env_once(env)
# Loadable modules are on Windows the same as shared libraries, but they
# are subject to different build parameters (LDMODULE* variables).
# Therefore LDMODULE* variables correspond as much as possible to
# SHLINK*/SHLIB* ones.
SCons.Tool.createLoadableModuleBuilder(env)
env['LDMODULE'] = '$SHLINK'
env['LDMODULEPREFIX'] = '$SHLIBPREFIX'
env['LDMODULESUFFIX'] = '$SHLIBSUFFIX'
env['LDMODULEFLAGS'] = '$SHLINKFLAGS'
env['_LDMODULE_TARGETS'] = _windowsLdmodTargets
env['_LDMODULE_SOURCES'] = _windowsLdmodSources
env['LDMODULEEMITTER'] = [ldmodEmitter]
env['LDMODULECOM'] = compositeLdmodAction
def exists(env):
return msvc_exists()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
lampsbr/grimorio | refs/heads/master | vendor/psy/psysh/test/tools/vis.py | 710 | """
vis.py
======
Ctypes based module to access libbsd's strvis & strunvis functions.
The `vis` function is the equivalent of strvis.
The `unvis` function is the equivalent of strunvis.
All functions accept unicode string as input and return a unicode string.
Constants:
----------
* to select alternate encoding format
`VIS_OCTAL`: use octal \ddd format
`VIS_CSTYLE`: use \[nrft0..] where appropiate
* to alter set of characters encoded
(default is to encode all non-graphic except space, tab, and newline).
`VIS_SP`: also encode space
`VIS_TAB`: also encode tab
`VIS_NL`: also encode newline
`VIS_WHITE`: same as (VIS_SP | VIS_TAB | VIS_NL)
`VIS_SAFE`: only encode "unsafe" characters
* other
`VIS_NOSLASH`: inhibit printing '\'
`VIS_HTTP1808`: http-style escape % hex hex
`VIS_HTTPSTYLE`: http-style escape % hex hex
`VIS_MIMESTYLE`: mime-style escape = HEX HEX
`VIS_HTTP1866`: http-style &#num; or &string;
`VIS_NOESCAPE`: don't decode `\'
`VIS_GLOB`: encode glob(3) magic characters
:Authors:
- ju1ius (http://github.com/ju1ius)
:Version: 1
:Date: 2014-01-05
"""
from ctypes import CDLL, c_char_p, c_int
from ctypes.util import find_library
__all__ = [
'vis', 'unvis',
'VIS_OCTAL', 'VIS_CSTYLE',
'VIS_SP', 'VIS_TAB', 'VIS_NL', 'VIS_WHITE', 'VIS_SAFE',
'VIS_NOSLASH', 'VIS_HTTP1808', 'VIS_HTTPSTYLE', 'VIS_MIMESTYLE',
'VIS_HTTP1866', 'VIS_NOESCAPE', 'VIS_GLOB'
]
#############################################################
# Constants from bsd/vis.h
#############################################################
#to select alternate encoding format
VIS_OCTAL = 0x0001
VIS_CSTYLE = 0x0002
# to alter set of characters encoded
# (default is to encode all non-graphic except space, tab, and newline).
VIS_SP = 0x0004
VIS_TAB = 0x0008
VIS_NL = 0x0010
VIS_WHITE = VIS_SP | VIS_TAB | VIS_NL
VIS_SAFE = 0x0020
# other
VIS_NOSLASH = 0x0040
VIS_HTTP1808 = 0x0080
VIS_HTTPSTYLE = 0x0080
VIS_MIMESTYLE = 0x0100
VIS_HTTP1866 = 0x0200
VIS_NOESCAPE = 0x0400
VIS_GLOB = 0x1000
#############################################################
# Import libbsd/vis functions
#############################################################
_libbsd = CDLL(find_library('bsd'))
_strvis = _libbsd.strvis
_strvis.argtypes = [c_char_p, c_char_p, c_int]
_strvis.restype = c_int
_strunvis = _libbsd.strunvis
_strvis.argtypes = [c_char_p, c_char_p]
_strvis.restype = c_int
def vis(src, flags=VIS_WHITE):
"""
Encodes the string `src` into libbsd's vis encoding.
`flags` must be one of the VIS_* constants
C definition:
int strvis(char *dst, char *src, int flags);
"""
src = bytes(src, 'utf-8')
dst_p = c_char_p(bytes(len(src) * 4))
src_p = c_char_p(src)
flags = c_int(flags)
bytes_written = _strvis(dst_p, src_p, flags)
if -1 == bytes_written:
raise RuntimeError('vis failed to encode string "{}"'.format(src))
return dst_p.value.decode('utf-8')
def unvis(src):
"""
Decodes a string encoded by vis.
C definition:
int strunvis(char *dst, char *src);
"""
src = bytes(src, 'utf-8')
dst_p = c_char_p(bytes(len(src)))
src_p = c_char_p(src)
bytes_written = _strunvis(dst_p, src_p)
if -1 == bytes_written:
raise RuntimeError('unvis failed to decode string "{}"'.format(src))
return dst_p.value.decode('utf-8')
|
gaoning777/YCSB-C-DTranx | refs/heads/master | Test/UnitTest/gtest/test/gtest_shuffle_test.py | 3023 | #!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that test shuffling works."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
# Command to run the gtest_shuffle_test_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_shuffle_test_')
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
TEST_FILTER = 'A*.A:A*.B:C*'
ALL_TESTS = []
ACTIVE_TESTS = []
FILTERED_TESTS = []
SHARDED_TESTS = []
SHUFFLED_ALL_TESTS = []
SHUFFLED_ACTIVE_TESTS = []
SHUFFLED_FILTERED_TESTS = []
SHUFFLED_SHARDED_TESTS = []
def AlsoRunDisabledTestsFlag():
return '--gtest_also_run_disabled_tests'
def FilterFlag(test_filter):
return '--gtest_filter=%s' % (test_filter,)
def RepeatFlag(n):
return '--gtest_repeat=%s' % (n,)
def ShuffleFlag():
return '--gtest_shuffle'
def RandomSeedFlag(n):
return '--gtest_random_seed=%s' % (n,)
def RunAndReturnOutput(extra_env, args):
"""Runs the test program and returns its output."""
environ_copy = os.environ.copy()
environ_copy.update(extra_env)
return gtest_test_utils.Subprocess([COMMAND] + args, env=environ_copy).output
def GetTestsForAllIterations(extra_env, args):
"""Runs the test program and returns a list of test lists.
Args:
extra_env: a map from environment variables to their values
args: command line flags to pass to gtest_shuffle_test_
Returns:
A list where the i-th element is the list of tests run in the i-th
test iteration.
"""
test_iterations = []
for line in RunAndReturnOutput(extra_env, args).split('\n'):
if line.startswith('----'):
tests = []
test_iterations.append(tests)
elif line.strip():
tests.append(line.strip()) # 'TestCaseName.TestName'
return test_iterations
def GetTestCases(tests):
"""Returns a list of test cases in the given full test names.
Args:
tests: a list of full test names
Returns:
A list of test cases from 'tests', in their original order.
Consecutive duplicates are removed.
"""
test_cases = []
for test in tests:
test_case = test.split('.')[0]
if not test_case in test_cases:
test_cases.append(test_case)
return test_cases
def CalculateTestLists():
"""Calculates the list of tests run under different flags."""
if not ALL_TESTS:
ALL_TESTS.extend(
GetTestsForAllIterations({}, [AlsoRunDisabledTestsFlag()])[0])
if not ACTIVE_TESTS:
ACTIVE_TESTS.extend(GetTestsForAllIterations({}, [])[0])
if not FILTERED_TESTS:
FILTERED_TESTS.extend(
GetTestsForAllIterations({}, [FilterFlag(TEST_FILTER)])[0])
if not SHARDED_TESTS:
SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[])[0])
if not SHUFFLED_ALL_TESTS:
SHUFFLED_ALL_TESTS.extend(GetTestsForAllIterations(
{}, [AlsoRunDisabledTestsFlag(), ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_ACTIVE_TESTS:
SHUFFLED_ACTIVE_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_FILTERED_TESTS:
SHUFFLED_FILTERED_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), FilterFlag(TEST_FILTER)])[0])
if not SHUFFLED_SHARDED_TESTS:
SHUFFLED_SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(1)])[0])
class GTestShuffleUnitTest(gtest_test_utils.TestCase):
"""Tests test shuffling."""
def setUp(self):
CalculateTestLists()
def testShufflePreservesNumberOfTests(self):
self.assertEqual(len(ALL_TESTS), len(SHUFFLED_ALL_TESTS))
self.assertEqual(len(ACTIVE_TESTS), len(SHUFFLED_ACTIVE_TESTS))
self.assertEqual(len(FILTERED_TESTS), len(SHUFFLED_FILTERED_TESTS))
self.assertEqual(len(SHARDED_TESTS), len(SHUFFLED_SHARDED_TESTS))
def testShuffleChangesTestOrder(self):
self.assert_(SHUFFLED_ALL_TESTS != ALL_TESTS, SHUFFLED_ALL_TESTS)
self.assert_(SHUFFLED_ACTIVE_TESTS != ACTIVE_TESTS, SHUFFLED_ACTIVE_TESTS)
self.assert_(SHUFFLED_FILTERED_TESTS != FILTERED_TESTS,
SHUFFLED_FILTERED_TESTS)
self.assert_(SHUFFLED_SHARDED_TESTS != SHARDED_TESTS,
SHUFFLED_SHARDED_TESTS)
def testShuffleChangesTestCaseOrder(self):
self.assert_(GetTestCases(SHUFFLED_ALL_TESTS) != GetTestCases(ALL_TESTS),
GetTestCases(SHUFFLED_ALL_TESTS))
self.assert_(
GetTestCases(SHUFFLED_ACTIVE_TESTS) != GetTestCases(ACTIVE_TESTS),
GetTestCases(SHUFFLED_ACTIVE_TESTS))
self.assert_(
GetTestCases(SHUFFLED_FILTERED_TESTS) != GetTestCases(FILTERED_TESTS),
GetTestCases(SHUFFLED_FILTERED_TESTS))
self.assert_(
GetTestCases(SHUFFLED_SHARDED_TESTS) != GetTestCases(SHARDED_TESTS),
GetTestCases(SHUFFLED_SHARDED_TESTS))
def testShuffleDoesNotRepeatTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assertEqual(1, SHUFFLED_ALL_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assertEqual(1, SHUFFLED_ACTIVE_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assertEqual(1, SHUFFLED_FILTERED_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assertEqual(1, SHUFFLED_SHARDED_TESTS.count(test),
'%s appears more than once' % (test,))
def testShuffleDoesNotCreateNewTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assert_(test in ALL_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assert_(test in ACTIVE_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assert_(test in FILTERED_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assert_(test in SHARDED_TESTS, '%s is an invalid test' % (test,))
def testShuffleIncludesAllTests(self):
for test in ALL_TESTS:
self.assert_(test in SHUFFLED_ALL_TESTS, '%s is missing' % (test,))
for test in ACTIVE_TESTS:
self.assert_(test in SHUFFLED_ACTIVE_TESTS, '%s is missing' % (test,))
for test in FILTERED_TESTS:
self.assert_(test in SHUFFLED_FILTERED_TESTS, '%s is missing' % (test,))
for test in SHARDED_TESTS:
self.assert_(test in SHUFFLED_SHARDED_TESTS, '%s is missing' % (test,))
def testShuffleLeavesDeathTestsAtFront(self):
non_death_test_found = False
for test in SHUFFLED_ACTIVE_TESTS:
if 'DeathTest.' in test:
self.assert_(not non_death_test_found,
'%s appears after a non-death test' % (test,))
else:
non_death_test_found = True
def _VerifyTestCasesDoNotInterleave(self, tests):
test_cases = []
for test in tests:
[test_case, _] = test.split('.')
if test_cases and test_cases[-1] != test_case:
test_cases.append(test_case)
self.assertEqual(1, test_cases.count(test_case),
'Test case %s is not grouped together in %s' %
(test_case, tests))
def testShuffleDoesNotInterleaveTestCases(self):
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ALL_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ACTIVE_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_FILTERED_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_SHARDED_TESTS)
def testShuffleRestoresOrderAfterEachIteration(self):
# Get the test lists in all 3 iterations, using random seed 1, 2,
# and 3 respectively. Google Test picks a different seed in each
# iteration, and this test depends on the current implementation
# picking successive numbers. This dependency is not ideal, but
# makes the test much easier to write.
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
# Make sure running the tests with random seed 1 gets the same
# order as in iteration 1 above.
[tests_with_seed1] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])
self.assertEqual(tests_in_iteration1, tests_with_seed1)
# Make sure running the tests with random seed 2 gets the same
# order as in iteration 2 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 2.
[tests_with_seed2] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(2)])
self.assertEqual(tests_in_iteration2, tests_with_seed2)
# Make sure running the tests with random seed 3 gets the same
# order as in iteration 3 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 3.
[tests_with_seed3] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(3)])
self.assertEqual(tests_in_iteration3, tests_with_seed3)
def testShuffleGeneratesNewOrderInEachIteration(self):
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
self.assert_(tests_in_iteration1 != tests_in_iteration2,
tests_in_iteration1)
self.assert_(tests_in_iteration1 != tests_in_iteration3,
tests_in_iteration1)
self.assert_(tests_in_iteration2 != tests_in_iteration3,
tests_in_iteration2)
def testShuffleShardedTestsPreservesPartition(self):
# If we run M tests on N shards, the same M tests should be run in
# total, regardless of the random seeds used by the shards.
[tests1] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '0'},
[ShuffleFlag(), RandomSeedFlag(1)])
[tests2] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(20)])
[tests3] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '2'},
[ShuffleFlag(), RandomSeedFlag(25)])
sorted_sharded_tests = tests1 + tests2 + tests3
sorted_sharded_tests.sort()
sorted_active_tests = []
sorted_active_tests.extend(ACTIVE_TESTS)
sorted_active_tests.sort()
self.assertEqual(sorted_active_tests, sorted_sharded_tests)
if __name__ == '__main__':
gtest_test_utils.Main()
|
Tendrl/commons | refs/heads/master | tendrl/commons/objects/node/flows/stop_services/__init__.py | 1 | from tendrl.commons import flows
from tendrl.commons.flows.exceptions import FlowExecutionFailedError
from tendrl.commons.utils import cmd_utils
from tendrl.commons.utils import log_utils as logger
class StopServices(flows.BaseFlow):
def __init__(self, *args, **kwargs):
super(StopServices, self).__init__(*args, **kwargs)
def run(self):
super(StopServices, self).run()
services = self.parameters['Services[]']
for service in services:
logger.log(
"info",
NS.publisher_id,
{
"message": "Stopping service %s on node %s" %
(service, NS.node_context.fqdn)
},
job_id=self.parameters['job_id'],
flow_id=self.parameters['flow_id'],
)
srv = NS.tendrl.objects.Service(service=service)
if not srv.running:
if len(srv.error) > 0:
raise FlowExecutionFailedError(
"Unable to check status of service %s "
"on %s. Error: %s" % (
service,
NS.node_context.node_id,
srv.error
)
)
else:
logger.log(
"debug",
NS.publisher_id,
{
"message": "%s not running on "
"%s" % (service, NS.node_context.fqdn)
},
job_id=self.parameters['job_id'],
flow_id=self.parameters['flow_id'],
)
continue
_cmd_str = "systemctl stop %s" % service
cmd = cmd_utils.Command(_cmd_str)
_, err, _ = cmd.run()
if err:
logger.log(
"debug",
NS.publisher_id,
{
"message": "Could not stop %s"
" service on %s. Error: %s" % (
service,
NS.node_context.fqdn,
err
)
},
job_id=self.parameters['job_id'],
flow_id=self.parameters['flow_id'],
)
_cmd_str = "systemctl disable %s" % service
cmd = cmd_utils.Command(_cmd_str)
_, err, _ = cmd.run()
if err:
logger.log(
"debug",
NS.publisher_id,
{
"message": "Could not disable %s"
" service on %s. Error: %s" % (
service,
NS.node_context.fqdn,
err
)
},
job_id=self.parameters['job_id'],
flow_id=self.parameters['flow_id'],
)
return True
|
jiangzhuo/kbengine | refs/heads/master | kbe/src/lib/python/Lib/test/test_fnmatch.py | 173 | """Test cases for the fnmatch module."""
from test import support
import unittest
from fnmatch import fnmatch, fnmatchcase, translate, filter
class FnmatchTestCase(unittest.TestCase):
def check_match(self, filename, pattern, should_match=1, fn=fnmatch):
if should_match:
self.assertTrue(fn(filename, pattern),
"expected %r to match pattern %r"
% (filename, pattern))
else:
self.assertTrue(not fn(filename, pattern),
"expected %r not to match pattern %r"
% (filename, pattern))
def test_fnmatch(self):
check = self.check_match
check('abc', 'abc')
check('abc', '?*?')
check('abc', '???*')
check('abc', '*???')
check('abc', '???')
check('abc', '*')
check('abc', 'ab[cd]')
check('abc', 'ab[!de]')
check('abc', 'ab[de]', 0)
check('a', '??', 0)
check('a', 'b', 0)
# these test that '\' is handled correctly in character sets;
# see SF bug #409651
check('\\', r'[\]')
check('a', r'[!\]')
check('\\', r'[!\]', 0)
# test that filenames with newlines in them are handled correctly.
# http://bugs.python.org/issue6665
check('foo\nbar', 'foo*')
check('foo\nbar\n', 'foo*')
check('\nfoo', 'foo*', False)
check('\n', '*')
def test_mix_bytes_str(self):
self.assertRaises(TypeError, fnmatch, 'test', b'*')
self.assertRaises(TypeError, fnmatch, b'test', '*')
self.assertRaises(TypeError, fnmatchcase, 'test', b'*')
self.assertRaises(TypeError, fnmatchcase, b'test', '*')
def test_fnmatchcase(self):
check = self.check_match
check('AbC', 'abc', 0, fnmatchcase)
check('abc', 'AbC', 0, fnmatchcase)
def test_bytes(self):
self.check_match(b'test', b'te*')
self.check_match(b'test\xff', b'te*\xff')
self.check_match(b'foo\nbar', b'foo*')
class TranslateTestCase(unittest.TestCase):
def test_translate(self):
self.assertEqual(translate('*'), '.*\Z(?ms)')
self.assertEqual(translate('?'), '.\Z(?ms)')
self.assertEqual(translate('a?b*'), 'a.b.*\Z(?ms)')
self.assertEqual(translate('[abc]'), '[abc]\Z(?ms)')
self.assertEqual(translate('[]]'), '[]]\Z(?ms)')
self.assertEqual(translate('[!x]'), '[^x]\Z(?ms)')
self.assertEqual(translate('[^x]'), '[\\^x]\Z(?ms)')
self.assertEqual(translate('[x'), '\\[x\Z(?ms)')
class FilterTestCase(unittest.TestCase):
def test_filter(self):
self.assertEqual(filter(['a', 'b'], 'a'), ['a'])
def test_main():
support.run_unittest(FnmatchTestCase,
TranslateTestCase,
FilterTestCase)
if __name__ == "__main__":
test_main()
|
JimCircadian/ansible | refs/heads/devel | lib/ansible/modules/windows/win_toast.py | 24 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_toast
version_added: "2.4"
short_description: Sends Toast windows notification to logged in users on Windows 10 or later hosts
description:
- Sends alerts which appear in the Action Center area of the windows desktop.
options:
expire:
description:
- How long in seconds before the notification expires.
default: 45
group:
description:
- Which notification group to add the notification to.
default: Powershell
msg:
description:
- The message to appear inside the notification.
- May include \n to format the message to appear within the Action Center.
default: Hello, World!
popup:
description:
- If C(no), the notification will not pop up and will only appear in the Action Center.
type: bool
default: yes
tag:
description:
- The tag to add to the notification.
default: Ansible
title:
description:
- The notification title, which appears in the pop up..
default: Notification HH:mm
author:
- Jon Hawkesworth (@jhawkesworth)
notes:
- This module must run on a windows 10 or Server 2016 host, so ensure your play targets windows hosts, or delegates to a windows host.
- The module does not fail if there are no logged in users to notify.
- Messages are only sent to the local host where the module is run.
- You must run this module with async, otherwise it will hang until the expire period has passed.
'''
EXAMPLES = r'''
- name: Warn logged in users of impending upgrade (note use of async to stop the module from waiting until notification expires).
win_toast:
expire: 60
title: System Upgrade Notification
msg: Automated upgrade about to start. Please save your work and log off before {{ deployment_start_time }}
async: 60
poll: 0
'''
RETURN = r'''
expire_at_utc:
description: Calculated utc date time when the notification expires.
returned: allways
type: string
sample: 07 July 2017 04:50:54
no_toast_sent_reason:
description: Text containing the reason why a notification was not sent.
returned: when no logged in users are detected
type: string
sample: No logged in users to notify
sent_localtime:
description: local date time when the notification was sent.
returned: allways
type: string
sample: 07 July 2017 05:45:54
time_taken:
description: How long the module took to run on the remote windows host in seconds.
returned: allways
type: float
sample: 0.3706631999999997
toast_sent:
description: Whether the module was able to send a toast notification or not.
returned: allways
type: boolean
sample: false
'''
|
saukrIppl/seahub | refs/heads/master | thirdpart/Django-1.8.10-py2.7.egg/django/contrib/gis/gdal/__init__.py | 130 | """
This module houses ctypes interfaces for GDAL objects. The following GDAL
objects are supported:
CoordTransform: Used for coordinate transformations from one spatial
reference system to another.
Driver: Wraps an OGR data source driver.
DataSource: Wrapper for the OGR data source object, supports
OGR-supported data sources.
Envelope: A ctypes structure for bounding boxes (GDAL library
not required).
OGRGeometry: Object for accessing OGR Geometry functionality.
OGRGeomType: A class for representing the different OGR Geometry
types (GDAL library not required).
SpatialReference: Represents OSR Spatial Reference objects.
The GDAL library will be imported from the system path using the default
library name for the current OS. The default library path may be overridden
by setting `GDAL_LIBRARY_PATH` in your settings with the path to the GDAL C
library on your system.
GDAL links to a large number of external libraries that consume RAM when
loaded. Thus, it may desirable to disable GDAL on systems with limited
RAM resources -- this may be accomplished by setting `GDAL_LIBRARY_PATH`
to a non-existent file location (e.g., `GDAL_LIBRARY_PATH='/null/path'`;
setting to None/False/'' will not work as a string must be given).
"""
from django.contrib.gis.gdal.error import (check_err, GDALException,
OGRException, OGRIndexError, SRSException) # NOQA
from django.contrib.gis.gdal.geomtype import OGRGeomType # NOQA
__all__ = [
'check_err', 'GDALException', 'OGRException', 'OGRIndexError',
'SRSException', 'OGRGeomType', 'HAS_GDAL',
]
# Attempting to import objects that depend on the GDAL library. The
# HAS_GDAL flag will be set to True if the library is present on
# the system.
try:
from django.contrib.gis.gdal.driver import Driver # NOQA
from django.contrib.gis.gdal.datasource import DataSource # NOQA
from django.contrib.gis.gdal.libgdal import gdal_version, gdal_full_version, GDAL_VERSION # NOQA
from django.contrib.gis.gdal.raster.source import GDALRaster # NOQA
from django.contrib.gis.gdal.srs import SpatialReference, CoordTransform # NOQA
from django.contrib.gis.gdal.geometries import OGRGeometry # NOQA
HAS_GDAL = True
__all__ += [
'Driver', 'DataSource', 'gdal_version', 'gdal_full_version',
'GDAL_VERSION', 'SpatialReference', 'CoordTransform', 'OGRGeometry',
]
except GDALException:
HAS_GDAL = False
try:
from django.contrib.gis.gdal.envelope import Envelope
__all__ += ['Envelope']
except ImportError:
# No ctypes, but don't raise an exception.
pass
|
RunningLight/machinekit | refs/heads/master | lib/python/gladevcp/hal_pyngcgui.py | 26 | #!/usr/bin/env python
#------------------------------------------------------------------------------
# Copyright: 2013
# Author: Dewey Garrett <dgarrett@panix.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#------------------------------------------------------------------------------
import os
import gtk
import gobject
import pango
import hal_actions
import pyngcgui
g_module = os.path.basename(__file__)
#-----------------------------------------------------------------------------
# class to make a gladevcp widget:
class PyNgcGui(gtk.Frame,hal_actions._EMC_ActionBase):
"""PyNgcGui -- gladevcp widget"""
__gtype_name__ = 'PyNgcGui'
__gproperties__ = {
'use_keyboard' : (gobject.TYPE_BOOLEAN
,'Use Popup Keyboard'
,'Yes or No'
,False
,gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT
),
'debug' : (gobject.TYPE_BOOLEAN
,'Debug'
,'Yes or No'
,False
,gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT
),
'verbose' : (gobject.TYPE_BOOLEAN
,'Verbose'
,'Yes or No'
,False
,gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT
),
'send_function_name': (gobject.TYPE_STRING
,'Send Function'
,'default_send | send_to_axis | dummy_send'
,'default_send'
,gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT
),
'send_to_dir': (gobject.TYPE_STRING
,'Send to dir'
,'None|touchy|dirname None(default:[DISPLAY]PROGRAM_PREFIX'
,''
,gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT
),
'gtk_theme_name': (gobject.TYPE_STRING
,'GTK+ Theme Name'
,'default | name_of_gtk+_theme'
,'Follow System Theme'
,gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT
),
'control_font_name': (gobject.TYPE_STRING
,'Control Font'
,'example: Sans 10'
,'Sans 10'
,gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT
),
}
__gproperties = __gproperties__ # self.__gproperties
def __init__(self):
super(PyNgcGui,self).__init__(label=None) # glade creates label anyway
self.set_label(None) # this doesn't work here
# the two attempts above don't prevent glade from making a Frame label
# put default property values in self.property_dict[]
self.property_dict = {}
for name in self.__gproperties.keys():
gtype = self.__gproperties[name][0]
if ( gtype == gobject.TYPE_BOOLEAN
or gtype == gobject.TYPE_STRING):
ty,lbl,tip,dflt,other = self.__gproperties[name]
if ( gtype == gobject.TYPE_INT
or gtype == gobject.TYPE_FLOAT):
ty,lbl,tip,minv,maxv,dflt,other = self.__gproperties[name]
self.property_dict[name] = dflt
gobject.timeout_add(1,self.go_ngcgui) # deferred
def do_get_property(self,property):
name = property.name.replace('-', '_')
if name in self.property_dict.keys():
return self.property_dict[name]
else:
raise AttributeError(_('%s:unknown property %s')
% (g_module,property.name))
def do_set_property(self,property,value):
name = property.name.replace('-','_')
if name not in self.__gproperties.keys():
raise(AttributeError
,_('%s:pyngcgui:do_set_property: unknown <%s>')
% (g_module,name))
else:
pyngcgui.vprint('SET P[%s]=%s' % (name,value))
self.property_dict[name] = value
def go_ngcgui(self):
self.start_NgcGui(debug = self.property_dict['debug']
,verbose = self.property_dict['verbose']
,use_keyboard = self.property_dict['use_keyboard']
,send_function_name = self.property_dict['send_function_name']
,send_to_dir = self.property_dict['send_to_dir']
,control_font_name = self.property_dict['control_font_name']
,gtk_theme_name = self.property_dict['gtk_theme_name']
)
gobject.timeout_add(1,self.remove_unwanted_label)
def remove_unwanted_label(self):
# coerce removal of unwanted label
self.set_label(None)
return False # one-time-only
def start_NgcGui(self
,debug=False
,verbose=False
,use_keyboard=False
,send_function_name=''
,send_to_dir=''
,control_font_name=None
,gtk_theme_name="Follow System Theme"
):
thenotebook = gtk.Notebook()
self.add(thenotebook) # tried with self=VBox,HBox,Frame
# Frame shows up best in glade designer
keyboardfile = None
if use_keyboard: keyboardfile = 'default'
send_function = None # None: let NgcGui handle it
if send_function_name == '': send_function = pyngcgui.default_send
elif send_function_name == 'dummy_send': send_function = pyngcgui.dummy_send
elif send_function_name == 'send_to_axis': send_function = pyngcgui.send_to_axis
elif send_function_name == 'default_send': send_function = pyngcgui.default_send
else:
print(_('%s:unknown send_function<%s>')
% (g_module,send_function_name))
if control_font_name is not None:
control_font = pango.FontDescription(control_font_name)
auto_file = None # use default behavior
if send_to_dir.strip() == "": send_to_dir = None
if send_to_dir is not None:
if send_to_dir == 'touchy':
# allow sent file to show up in touchy auto tab page
send_to_dir = '~/linuxcnc/nc_files'
if not os.path.isdir(os.path.expanduser(send_to_dir)):
raise ValueError(_('%s:Not a directory:\n %s\n'
% (g_module,send_to_dir)))
auto_file = os.path.expanduser(
os.path.join(send_to_dir,'ngcgui_generated.ngc'))
self.ngcgui = pyngcgui.NgcGui(w=thenotebook
,debug=debug
,verbose=verbose
,keyboardfile=keyboardfile
,send_function=send_function # prototype: (fname)
,auto_file=auto_file # None for default behavior
,control_font=control_font
,gtk_theme_name=gtk_theme_name
)
|
palashahuja/pgmpy | refs/heads/dev | pgmpy/base/UndirectedGraph.py | 2 | #!/usr/bin/env python3
import itertools
import networkx as nx
class UndirectedGraph(nx.Graph):
"""
Base class for all the Undirected Graphical models.
UndirectedGraph assumes that all the nodes in graph are either random
variables, factors or cliques of random variables and edges in the graphs
are interactions between these random variables, factors or clusters.
Parameters
----------
data: input graph
Data to initialize graph. If data=None (default) an empty graph is
created. The data can be an edge list or any Networkx graph object.
Examples
--------
Create an empty UndirectedGraph with no nodes and no edges
>>> from pgmpy.base import UndirectedGraph
>>> G = UndirectedGraph()
G can be grown in several ways
**Nodes:**
Add one node at a time:
>>> G.add_node('a')
Add the nodes from any container (a list, set or tuple or the nodes
from another graph).
>>> G.add_nodes_from(['a', 'b'])
**Edges:**
G can also be grown by adding edges.
Add one edge,
>>> G.add_edge('a', 'b')
a list of edges,
>>> G.add_edges_from([('a', 'b'), ('b', 'c')])
If some edges connect nodes not yet in the model, the nodes
are added automatically. There are no errors when adding
nodes or edges that already exist.
**Shortcuts:**
Many common graph features allow python syntax for speed reporting.
>>> 'a' in G # check if node in graph
True
>>> len(G) # number of nodes in graph
3
"""
def __init__(self, ebunch=None):
super().__init__(ebunch)
def add_node(self, node, **kwargs):
"""
Add a single node to the Graph.
Parameters
----------
node: node
A node can be any hashable Python object.
Examples
--------
>>> from pgmpy.base import UndirectedGraph
>>> G = UndirectedGraph()
>>> G.add_node('A')
"""
super().add_node(node, **kwargs)
def add_nodes_from(self, nodes, **kwargs):
"""
Add multiple nodes to the Graph.
Parameters
----------
nodes: iterable container
A container of nodes (list, dict, set, etc.).
Examples
--------
>>> from pgmpy.base import UndirectedGraph
>>> G = UndirectedGraph()
>>> G.add_nodes_from(['A', 'B', 'C'])
"""
for node in nodes:
self.add_node(node, **kwargs)
def add_edge(self, u, v, **kwargs):
"""
Add an edge between u and v.
The nodes u and v will be automatically added if they are
not already in the graph
Parameters
----------
u,v : nodes
Nodes can be any hashable Python object.
Examples
--------
>>> from pgmpy.base import UndirectedGraph
>>> G = UndirectedGraph()
>>> G.add_nodes_from(['Alice', 'Bob', 'Charles'])
>>> G.add_edge('Alice', 'Bob')
"""
super().add_edge(u, v, **kwargs)
def add_edges_from(self, ebunch, **kwargs):
"""
Add all the edges in ebunch.
If nodes referred in the ebunch are not already present, they
will be automatically added.
Parameters
----------
ebunch : container of edges
Each edge given in the container will be added to the graph.
The edges must be given as 2-tuples (u, v).
Examples
--------
>>> from pgmpy.base import UndirectedGraph
>>> G = UndirectedGraph()
>>> G.add_nodes_from(['Alice', 'Bob', 'Charles'])
>>> G.add_edges_from([('Alice', 'Bob'), ('Bob', 'Charles')])
"""
for edge in ebunch:
self.add_edge(*edge, **kwargs)
def check_clique(self, nodes):
"""
Check if the given nodes form a clique.
Parameters
----------
nodes: list, array-like
List of nodes to check if they are a part of any clique.
"""
for node1, node2 in itertools.combinations(nodes, 2):
if not self.has_edge(node1, node2):
return False
return True
def is_triangulated(self):
"""
Checks whether the undirected graph is triangulated or not.
Examples
--------
>>> from pgmpy.base import UndirectedGraph
>>> G = UndirectedGraph()
>>> G.add_edges_from([('x1', 'x2'), ('x1', 'x3'), ('x1', 'x4'),
... ('x2', 'x4'), ('x3', 'x4')])
>>> G.is_triangulated()
True
"""
return nx.is_chordal(self)
|
rentalita/django-layoutdemo | refs/heads/master | src/python/layoutdemo/default/tasks.py | 22 | # -*- coding: utf-8 -*-
# Local Variables:
# indent-tabs-mode: nil
# End:
# vim: ai et sw=4 ts=4
|
the-adrian/KernotekV2.0 | refs/heads/master | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/euctwfreq.py | 3132 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# EUCTW frequency table
# Converted from big5 work
# by Taiwan's Mandarin Promotion Council
# <http:#www.edu.tw:81/mandr/>
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Idea Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75
# Char to FreqOrder table ,
EUCTW_TABLE_SIZE = 8102
EUCTWCharToFreqOrder = (
1,1800,1506, 255,1431, 198, 9, 82, 6,7310, 177, 202,3615,1256,2808, 110, # 2742
3735, 33,3241, 261, 76, 44,2113, 16,2931,2184,1176, 659,3868, 26,3404,2643, # 2758
1198,3869,3313,4060, 410,2211, 302, 590, 361,1963, 8, 204, 58,4296,7311,1931, # 2774
63,7312,7313, 317,1614, 75, 222, 159,4061,2412,1480,7314,3500,3068, 224,2809, # 2790
3616, 3, 10,3870,1471, 29,2774,1135,2852,1939, 873, 130,3242,1123, 312,7315, # 2806
4297,2051, 507, 252, 682,7316, 142,1914, 124, 206,2932, 34,3501,3173, 64, 604, # 2822
7317,2494,1976,1977, 155,1990, 645, 641,1606,7318,3405, 337, 72, 406,7319, 80, # 2838
630, 238,3174,1509, 263, 939,1092,2644, 756,1440,1094,3406, 449, 69,2969, 591, # 2854
179,2095, 471, 115,2034,1843, 60, 50,2970, 134, 806,1868, 734,2035,3407, 180, # 2870
995,1607, 156, 537,2893, 688,7320, 319,1305, 779,2144, 514,2374, 298,4298, 359, # 2886
2495, 90,2707,1338, 663, 11, 906,1099,2545, 20,2436, 182, 532,1716,7321, 732, # 2902
1376,4062,1311,1420,3175, 25,2312,1056, 113, 399, 382,1949, 242,3408,2467, 529, # 2918
3243, 475,1447,3617,7322, 117, 21, 656, 810,1297,2295,2329,3502,7323, 126,4063, # 2934
706, 456, 150, 613,4299, 71,1118,2036,4064, 145,3069, 85, 835, 486,2114,1246, # 2950
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,7324,2127,2354, 347,3736, 221, # 2966
3503,3110,7325,1955,1153,4065, 83, 296,1199,3070, 192, 624, 93,7326, 822,1897, # 2982
2810,3111, 795,2064, 991,1554,1542,1592, 27, 43,2853, 859, 139,1456, 860,4300, # 2998
437, 712,3871, 164,2392,3112, 695, 211,3017,2096, 195,3872,1608,3504,3505,3618, # 3014
3873, 234, 811,2971,2097,3874,2229,1441,3506,1615,2375, 668,2076,1638, 305, 228, # 3030
1664,4301, 467, 415,7327, 262,2098,1593, 239, 108, 300, 200,1033, 512,1247,2077, # 3046
7328,7329,2173,3176,3619,2673, 593, 845,1062,3244, 88,1723,2037,3875,1950, 212, # 3062
266, 152, 149, 468,1898,4066,4302, 77, 187,7330,3018, 37, 5,2972,7331,3876, # 3078
7332,7333, 39,2517,4303,2894,3177,2078, 55, 148, 74,4304, 545, 483,1474,1029, # 3094
1665, 217,1869,1531,3113,1104,2645,4067, 24, 172,3507, 900,3877,3508,3509,4305, # 3110
32,1408,2811,1312, 329, 487,2355,2247,2708, 784,2674, 4,3019,3314,1427,1788, # 3126
188, 109, 499,7334,3620,1717,1789, 888,1217,3020,4306,7335,3510,7336,3315,1520, # 3142
3621,3878, 196,1034, 775,7337,7338, 929,1815, 249, 439, 38,7339,1063,7340, 794, # 3158
3879,1435,2296, 46, 178,3245,2065,7341,2376,7342, 214,1709,4307, 804, 35, 707, # 3174
324,3622,1601,2546, 140, 459,4068,7343,7344,1365, 839, 272, 978,2257,2572,3409, # 3190
2128,1363,3623,1423, 697, 100,3071, 48, 70,1231, 495,3114,2193,7345,1294,7346, # 3206
2079, 462, 586,1042,3246, 853, 256, 988, 185,2377,3410,1698, 434,1084,7347,3411, # 3222
314,2615,2775,4308,2330,2331, 569,2280, 637,1816,2518, 757,1162,1878,1616,3412, # 3238
287,1577,2115, 768,4309,1671,2854,3511,2519,1321,3737, 909,2413,7348,4069, 933, # 3254
3738,7349,2052,2356,1222,4310, 765,2414,1322, 786,4311,7350,1919,1462,1677,2895, # 3270
1699,7351,4312,1424,2437,3115,3624,2590,3316,1774,1940,3413,3880,4070, 309,1369, # 3286
1130,2812, 364,2230,1653,1299,3881,3512,3882,3883,2646, 525,1085,3021, 902,2000, # 3302
1475, 964,4313, 421,1844,1415,1057,2281, 940,1364,3116, 376,4314,4315,1381, 7, # 3318
2520, 983,2378, 336,1710,2675,1845, 321,3414, 559,1131,3022,2742,1808,1132,1313, # 3334
265,1481,1857,7352, 352,1203,2813,3247, 167,1089, 420,2814, 776, 792,1724,3513, # 3350
4071,2438,3248,7353,4072,7354, 446, 229, 333,2743, 901,3739,1200,1557,4316,2647, # 3366
1920, 395,2744,2676,3740,4073,1835, 125, 916,3178,2616,4317,7355,7356,3741,7357, # 3382
7358,7359,4318,3117,3625,1133,2547,1757,3415,1510,2313,1409,3514,7360,2145, 438, # 3398
2591,2896,2379,3317,1068, 958,3023, 461, 311,2855,2677,4074,1915,3179,4075,1978, # 3414
383, 750,2745,2617,4076, 274, 539, 385,1278,1442,7361,1154,1964, 384, 561, 210, # 3430
98,1295,2548,3515,7362,1711,2415,1482,3416,3884,2897,1257, 129,7363,3742, 642, # 3446
523,2776,2777,2648,7364, 141,2231,1333, 68, 176, 441, 876, 907,4077, 603,2592, # 3462
710, 171,3417, 404, 549, 18,3118,2393,1410,3626,1666,7365,3516,4319,2898,4320, # 3478
7366,2973, 368,7367, 146, 366, 99, 871,3627,1543, 748, 807,1586,1185, 22,2258, # 3494
379,3743,3180,7368,3181, 505,1941,2618,1991,1382,2314,7369, 380,2357, 218, 702, # 3510
1817,1248,3418,3024,3517,3318,3249,7370,2974,3628, 930,3250,3744,7371, 59,7372, # 3526
585, 601,4078, 497,3419,1112,1314,4321,1801,7373,1223,1472,2174,7374, 749,1836, # 3542
690,1899,3745,1772,3885,1476, 429,1043,1790,2232,2116, 917,4079, 447,1086,1629, # 3558
7375, 556,7376,7377,2020,1654, 844,1090, 105, 550, 966,1758,2815,1008,1782, 686, # 3574
1095,7378,2282, 793,1602,7379,3518,2593,4322,4080,2933,2297,4323,3746, 980,2496, # 3590
544, 353, 527,4324, 908,2678,2899,7380, 381,2619,1942,1348,7381,1341,1252, 560, # 3606
3072,7382,3420,2856,7383,2053, 973, 886,2080, 143,4325,7384,7385, 157,3886, 496, # 3622
4081, 57, 840, 540,2038,4326,4327,3421,2117,1445, 970,2259,1748,1965,2081,4082, # 3638
3119,1234,1775,3251,2816,3629, 773,1206,2129,1066,2039,1326,3887,1738,1725,4083, # 3654
279,3120, 51,1544,2594, 423,1578,2130,2066, 173,4328,1879,7386,7387,1583, 264, # 3670
610,3630,4329,2439, 280, 154,7388,7389,7390,1739, 338,1282,3073, 693,2857,1411, # 3686
1074,3747,2440,7391,4330,7392,7393,1240, 952,2394,7394,2900,1538,2679, 685,1483, # 3702
4084,2468,1436, 953,4085,2054,4331, 671,2395, 79,4086,2441,3252, 608, 567,2680, # 3718
3422,4087,4088,1691, 393,1261,1791,2396,7395,4332,7396,7397,7398,7399,1383,1672, # 3734
3748,3182,1464, 522,1119, 661,1150, 216, 675,4333,3888,1432,3519, 609,4334,2681, # 3750
2397,7400,7401,7402,4089,3025, 0,7403,2469, 315, 231,2442, 301,3319,4335,2380, # 3766
7404, 233,4090,3631,1818,4336,4337,7405, 96,1776,1315,2082,7406, 257,7407,1809, # 3782
3632,2709,1139,1819,4091,2021,1124,2163,2778,1777,2649,7408,3074, 363,1655,3183, # 3798
7409,2975,7410,7411,7412,3889,1567,3890, 718, 103,3184, 849,1443, 341,3320,2934, # 3814
1484,7413,1712, 127, 67, 339,4092,2398, 679,1412, 821,7414,7415, 834, 738, 351, # 3830
2976,2146, 846, 235,1497,1880, 418,1992,3749,2710, 186,1100,2147,2746,3520,1545, # 3846
1355,2935,2858,1377, 583,3891,4093,2573,2977,7416,1298,3633,1078,2549,3634,2358, # 3862
78,3750,3751, 267,1289,2099,2001,1594,4094, 348, 369,1274,2194,2175,1837,4338, # 3878
1820,2817,3635,2747,2283,2002,4339,2936,2748, 144,3321, 882,4340,3892,2749,3423, # 3894
4341,2901,7417,4095,1726, 320,7418,3893,3026, 788,2978,7419,2818,1773,1327,2859, # 3910
3894,2819,7420,1306,4342,2003,1700,3752,3521,2359,2650, 787,2022, 506, 824,3636, # 3926
534, 323,4343,1044,3322,2023,1900, 946,3424,7421,1778,1500,1678,7422,1881,4344, # 3942
165, 243,4345,3637,2521, 123, 683,4096, 764,4346, 36,3895,1792, 589,2902, 816, # 3958
626,1667,3027,2233,1639,1555,1622,3753,3896,7423,3897,2860,1370,1228,1932, 891, # 3974
2083,2903, 304,4097,7424, 292,2979,2711,3522, 691,2100,4098,1115,4347, 118, 662, # 3990
7425, 611,1156, 854,2381,1316,2861, 2, 386, 515,2904,7426,7427,3253, 868,2234, # 4006
1486, 855,2651, 785,2212,3028,7428,1040,3185,3523,7429,3121, 448,7430,1525,7431, # 4022
2164,4348,7432,3754,7433,4099,2820,3524,3122, 503, 818,3898,3123,1568, 814, 676, # 4038
1444, 306,1749,7434,3755,1416,1030, 197,1428, 805,2821,1501,4349,7435,7436,7437, # 4054
1993,7438,4350,7439,7440,2195, 13,2779,3638,2980,3124,1229,1916,7441,3756,2131, # 4070
7442,4100,4351,2399,3525,7443,2213,1511,1727,1120,7444,7445, 646,3757,2443, 307, # 4086
7446,7447,1595,3186,7448,7449,7450,3639,1113,1356,3899,1465,2522,2523,7451, 519, # 4102
7452, 128,2132, 92,2284,1979,7453,3900,1512, 342,3125,2196,7454,2780,2214,1980, # 4118
3323,7455, 290,1656,1317, 789, 827,2360,7456,3758,4352, 562, 581,3901,7457, 401, # 4134
4353,2248, 94,4354,1399,2781,7458,1463,2024,4355,3187,1943,7459, 828,1105,4101, # 4150
1262,1394,7460,4102, 605,4356,7461,1783,2862,7462,2822, 819,2101, 578,2197,2937, # 4166
7463,1502, 436,3254,4103,3255,2823,3902,2905,3425,3426,7464,2712,2315,7465,7466, # 4182
2332,2067, 23,4357, 193, 826,3759,2102, 699,1630,4104,3075, 390,1793,1064,3526, # 4198
7467,1579,3076,3077,1400,7468,4105,1838,1640,2863,7469,4358,4359, 137,4106, 598, # 4214
3078,1966, 780, 104, 974,2938,7470, 278, 899, 253, 402, 572, 504, 493,1339,7471, # 4230
3903,1275,4360,2574,2550,7472,3640,3029,3079,2249, 565,1334,2713, 863, 41,7473, # 4246
7474,4361,7475,1657,2333, 19, 463,2750,4107, 606,7476,2981,3256,1087,2084,1323, # 4262
2652,2982,7477,1631,1623,1750,4108,2682,7478,2864, 791,2714,2653,2334, 232,2416, # 4278
7479,2983,1498,7480,2654,2620, 755,1366,3641,3257,3126,2025,1609, 119,1917,3427, # 4294
862,1026,4109,7481,3904,3760,4362,3905,4363,2260,1951,2470,7482,1125, 817,4110, # 4310
4111,3906,1513,1766,2040,1487,4112,3030,3258,2824,3761,3127,7483,7484,1507,7485, # 4326
2683, 733, 40,1632,1106,2865, 345,4113, 841,2524, 230,4364,2984,1846,3259,3428, # 4342
7486,1263, 986,3429,7487, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562,3907, # 4358
3908,2939, 967,2751,2655,1349, 592,2133,1692,3324,2985,1994,4114,1679,3909,1901, # 4374
2185,7488, 739,3642,2715,1296,1290,7489,4115,2198,2199,1921,1563,2595,2551,1870, # 4390
2752,2986,7490, 435,7491, 343,1108, 596, 17,1751,4365,2235,3430,3643,7492,4366, # 4406
294,3527,2940,1693, 477, 979, 281,2041,3528, 643,2042,3644,2621,2782,2261,1031, # 4422
2335,2134,2298,3529,4367, 367,1249,2552,7493,3530,7494,4368,1283,3325,2004, 240, # 4438
1762,3326,4369,4370, 836,1069,3128, 474,7495,2148,2525, 268,3531,7496,3188,1521, # 4454
1284,7497,1658,1546,4116,7498,3532,3533,7499,4117,3327,2684,1685,4118, 961,1673, # 4470
2622, 190,2005,2200,3762,4371,4372,7500, 570,2497,3645,1490,7501,4373,2623,3260, # 4486
1956,4374, 584,1514, 396,1045,1944,7502,4375,1967,2444,7503,7504,4376,3910, 619, # 4502
7505,3129,3261, 215,2006,2783,2553,3189,4377,3190,4378, 763,4119,3763,4379,7506, # 4518
7507,1957,1767,2941,3328,3646,1174, 452,1477,4380,3329,3130,7508,2825,1253,2382, # 4534
2186,1091,2285,4120, 492,7509, 638,1169,1824,2135,1752,3911, 648, 926,1021,1324, # 4550
4381, 520,4382, 997, 847,1007, 892,4383,3764,2262,1871,3647,7510,2400,1784,4384, # 4566
1952,2942,3080,3191,1728,4121,2043,3648,4385,2007,1701,3131,1551, 30,2263,4122, # 4582
7511,2026,4386,3534,7512, 501,7513,4123, 594,3431,2165,1821,3535,3432,3536,3192, # 4598
829,2826,4124,7514,1680,3132,1225,4125,7515,3262,4387,4126,3133,2336,7516,4388, # 4614
4127,7517,3912,3913,7518,1847,2383,2596,3330,7519,4389, 374,3914, 652,4128,4129, # 4630
375,1140, 798,7520,7521,7522,2361,4390,2264, 546,1659, 138,3031,2445,4391,7523, # 4646
2250, 612,1848, 910, 796,3765,1740,1371, 825,3766,3767,7524,2906,2554,7525, 692, # 4662
444,3032,2624, 801,4392,4130,7526,1491, 244,1053,3033,4131,4132, 340,7527,3915, # 4678
1041,2987, 293,1168, 87,1357,7528,1539, 959,7529,2236, 721, 694,4133,3768, 219, # 4694
1478, 644,1417,3331,2656,1413,1401,1335,1389,3916,7530,7531,2988,2362,3134,1825, # 4710
730,1515, 184,2827, 66,4393,7532,1660,2943, 246,3332, 378,1457, 226,3433, 975, # 4726
3917,2944,1264,3537, 674, 696,7533, 163,7534,1141,2417,2166, 713,3538,3333,4394, # 4742
3918,7535,7536,1186, 15,7537,1079,1070,7538,1522,3193,3539, 276,1050,2716, 758, # 4758
1126, 653,2945,3263,7539,2337, 889,3540,3919,3081,2989, 903,1250,4395,3920,3434, # 4774
3541,1342,1681,1718, 766,3264, 286, 89,2946,3649,7540,1713,7541,2597,3334,2990, # 4790
7542,2947,2215,3194,2866,7543,4396,2498,2526, 181, 387,1075,3921, 731,2187,3335, # 4806
7544,3265, 310, 313,3435,2299, 770,4134, 54,3034, 189,4397,3082,3769,3922,7545, # 4822
1230,1617,1849, 355,3542,4135,4398,3336, 111,4136,3650,1350,3135,3436,3035,4137, # 4838
2149,3266,3543,7546,2784,3923,3924,2991, 722,2008,7547,1071, 247,1207,2338,2471, # 4854
1378,4399,2009, 864,1437,1214,4400, 373,3770,1142,2216, 667,4401, 442,2753,2555, # 4870
3771,3925,1968,4138,3267,1839, 837, 170,1107, 934,1336,1882,7548,7549,2118,4139, # 4886
2828, 743,1569,7550,4402,4140, 582,2384,1418,3437,7551,1802,7552, 357,1395,1729, # 4902
3651,3268,2418,1564,2237,7553,3083,3772,1633,4403,1114,2085,4141,1532,7554, 482, # 4918
2446,4404,7555,7556,1492, 833,1466,7557,2717,3544,1641,2829,7558,1526,1272,3652, # 4934
4142,1686,1794, 416,2556,1902,1953,1803,7559,3773,2785,3774,1159,2316,7560,2867, # 4950
4405,1610,1584,3036,2419,2754, 443,3269,1163,3136,7561,7562,3926,7563,4143,2499, # 4966
3037,4406,3927,3137,2103,1647,3545,2010,1872,4144,7564,4145, 431,3438,7565, 250, # 4982
97, 81,4146,7566,1648,1850,1558, 160, 848,7567, 866, 740,1694,7568,2201,2830, # 4998
3195,4147,4407,3653,1687, 950,2472, 426, 469,3196,3654,3655,3928,7569,7570,1188, # 5014
424,1995, 861,3546,4148,3775,2202,2685, 168,1235,3547,4149,7571,2086,1674,4408, # 5030
3337,3270, 220,2557,1009,7572,3776, 670,2992, 332,1208, 717,7573,7574,3548,2447, # 5046
3929,3338,7575, 513,7576,1209,2868,3339,3138,4409,1080,7577,7578,7579,7580,2527, # 5062
3656,3549, 815,1587,3930,3931,7581,3550,3439,3777,1254,4410,1328,3038,1390,3932, # 5078
1741,3933,3778,3934,7582, 236,3779,2448,3271,7583,7584,3657,3780,1273,3781,4411, # 5094
7585, 308,7586,4412, 245,4413,1851,2473,1307,2575, 430, 715,2136,2449,7587, 270, # 5110
199,2869,3935,7588,3551,2718,1753, 761,1754, 725,1661,1840,4414,3440,3658,7589, # 5126
7590, 587, 14,3272, 227,2598, 326, 480,2265, 943,2755,3552, 291, 650,1883,7591, # 5142
1702,1226, 102,1547, 62,3441, 904,4415,3442,1164,4150,7592,7593,1224,1548,2756, # 5158
391, 498,1493,7594,1386,1419,7595,2055,1177,4416, 813, 880,1081,2363, 566,1145, # 5174
4417,2286,1001,1035,2558,2599,2238, 394,1286,7596,7597,2068,7598, 86,1494,1730, # 5190
3936, 491,1588, 745, 897,2948, 843,3340,3937,2757,2870,3273,1768, 998,2217,2069, # 5206
397,1826,1195,1969,3659,2993,3341, 284,7599,3782,2500,2137,2119,1903,7600,3938, # 5222
2150,3939,4151,1036,3443,1904, 114,2559,4152, 209,1527,7601,7602,2949,2831,2625, # 5238
2385,2719,3139, 812,2560,7603,3274,7604,1559, 737,1884,3660,1210, 885, 28,2686, # 5254
3553,3783,7605,4153,1004,1779,4418,7606, 346,1981,2218,2687,4419,3784,1742, 797, # 5270
1642,3940,1933,1072,1384,2151, 896,3941,3275,3661,3197,2871,3554,7607,2561,1958, # 5286
4420,2450,1785,7608,7609,7610,3942,4154,1005,1308,3662,4155,2720,4421,4422,1528, # 5302
2600, 161,1178,4156,1982, 987,4423,1101,4157, 631,3943,1157,3198,2420,1343,1241, # 5318
1016,2239,2562, 372, 877,2339,2501,1160, 555,1934, 911,3944,7611, 466,1170, 169, # 5334
1051,2907,2688,3663,2474,2994,1182,2011,2563,1251,2626,7612, 992,2340,3444,1540, # 5350
2721,1201,2070,2401,1996,2475,7613,4424, 528,1922,2188,1503,1873,1570,2364,3342, # 5366
3276,7614, 557,1073,7615,1827,3445,2087,2266,3140,3039,3084, 767,3085,2786,4425, # 5382
1006,4158,4426,2341,1267,2176,3664,3199, 778,3945,3200,2722,1597,2657,7616,4427, # 5398
7617,3446,7618,7619,7620,3277,2689,1433,3278, 131, 95,1504,3946, 723,4159,3141, # 5414
1841,3555,2758,2189,3947,2027,2104,3665,7621,2995,3948,1218,7622,3343,3201,3949, # 5430
4160,2576, 248,1634,3785, 912,7623,2832,3666,3040,3786, 654, 53,7624,2996,7625, # 5446
1688,4428, 777,3447,1032,3950,1425,7626, 191, 820,2120,2833, 971,4429, 931,3202, # 5462
135, 664, 783,3787,1997, 772,2908,1935,3951,3788,4430,2909,3203, 282,2723, 640, # 5478
1372,3448,1127, 922, 325,3344,7627,7628, 711,2044,7629,7630,3952,2219,2787,1936, # 5494
3953,3345,2220,2251,3789,2300,7631,4431,3790,1258,3279,3954,3204,2138,2950,3955, # 5510
3956,7632,2221, 258,3205,4432, 101,1227,7633,3280,1755,7634,1391,3281,7635,2910, # 5526
2056, 893,7636,7637,7638,1402,4161,2342,7639,7640,3206,3556,7641,7642, 878,1325, # 5542
1780,2788,4433, 259,1385,2577, 744,1183,2267,4434,7643,3957,2502,7644, 684,1024, # 5558
4162,7645, 472,3557,3449,1165,3282,3958,3959, 322,2152, 881, 455,1695,1152,1340, # 5574
660, 554,2153,4435,1058,4436,4163, 830,1065,3346,3960,4437,1923,7646,1703,1918, # 5590
7647, 932,2268, 122,7648,4438, 947, 677,7649,3791,2627, 297,1905,1924,2269,4439, # 5606
2317,3283,7650,7651,4164,7652,4165, 84,4166, 112, 989,7653, 547,1059,3961, 701, # 5622
3558,1019,7654,4167,7655,3450, 942, 639, 457,2301,2451, 993,2951, 407, 851, 494, # 5638
4440,3347, 927,7656,1237,7657,2421,3348, 573,4168, 680, 921,2911,1279,1874, 285, # 5654
790,1448,1983, 719,2167,7658,7659,4441,3962,3963,1649,7660,1541, 563,7661,1077, # 5670
7662,3349,3041,3451, 511,2997,3964,3965,3667,3966,1268,2564,3350,3207,4442,4443, # 5686
7663, 535,1048,1276,1189,2912,2028,3142,1438,1373,2834,2952,1134,2012,7664,4169, # 5702
1238,2578,3086,1259,7665, 700,7666,2953,3143,3668,4170,7667,4171,1146,1875,1906, # 5718
4444,2601,3967, 781,2422, 132,1589, 203, 147, 273,2789,2402, 898,1786,2154,3968, # 5734
3969,7668,3792,2790,7669,7670,4445,4446,7671,3208,7672,1635,3793, 965,7673,1804, # 5750
2690,1516,3559,1121,1082,1329,3284,3970,1449,3794, 65,1128,2835,2913,2759,1590, # 5766
3795,7674,7675, 12,2658, 45, 976,2579,3144,4447, 517,2528,1013,1037,3209,7676, # 5782
3796,2836,7677,3797,7678,3452,7679,2602, 614,1998,2318,3798,3087,2724,2628,7680, # 5798
2580,4172, 599,1269,7681,1810,3669,7682,2691,3088, 759,1060, 489,1805,3351,3285, # 5814
1358,7683,7684,2386,1387,1215,2629,2252, 490,7685,7686,4173,1759,2387,2343,7687, # 5830
4448,3799,1907,3971,2630,1806,3210,4449,3453,3286,2760,2344, 874,7688,7689,3454, # 5846
3670,1858, 91,2914,3671,3042,3800,4450,7690,3145,3972,2659,7691,3455,1202,1403, # 5862
3801,2954,2529,1517,2503,4451,3456,2504,7692,4452,7693,2692,1885,1495,1731,3973, # 5878
2365,4453,7694,2029,7695,7696,3974,2693,1216, 237,2581,4174,2319,3975,3802,4454, # 5894
4455,2694,3560,3457, 445,4456,7697,7698,7699,7700,2761, 61,3976,3672,1822,3977, # 5910
7701, 687,2045, 935, 925, 405,2660, 703,1096,1859,2725,4457,3978,1876,1367,2695, # 5926
3352, 918,2105,1781,2476, 334,3287,1611,1093,4458, 564,3146,3458,3673,3353, 945, # 5942
2631,2057,4459,7702,1925, 872,4175,7703,3459,2696,3089, 349,4176,3674,3979,4460, # 5958
3803,4177,3675,2155,3980,4461,4462,4178,4463,2403,2046, 782,3981, 400, 251,4179, # 5974
1624,7704,7705, 277,3676, 299,1265, 476,1191,3804,2121,4180,4181,1109, 205,7706, # 5990
2582,1000,2156,3561,1860,7707,7708,7709,4464,7710,4465,2565, 107,2477,2157,3982, # 6006
3460,3147,7711,1533, 541,1301, 158, 753,4182,2872,3562,7712,1696, 370,1088,4183, # 6022
4466,3563, 579, 327, 440, 162,2240, 269,1937,1374,3461, 968,3043, 56,1396,3090, # 6038
2106,3288,3354,7713,1926,2158,4467,2998,7714,3564,7715,7716,3677,4468,2478,7717, # 6054
2791,7718,1650,4469,7719,2603,7720,7721,3983,2661,3355,1149,3356,3984,3805,3985, # 6070
7722,1076, 49,7723, 951,3211,3289,3290, 450,2837, 920,7724,1811,2792,2366,4184, # 6086
1908,1138,2367,3806,3462,7725,3212,4470,1909,1147,1518,2423,4471,3807,7726,4472, # 6102
2388,2604, 260,1795,3213,7727,7728,3808,3291, 708,7729,3565,1704,7730,3566,1351, # 6118
1618,3357,2999,1886, 944,4185,3358,4186,3044,3359,4187,7731,3678, 422, 413,1714, # 6134
3292, 500,2058,2345,4188,2479,7732,1344,1910, 954,7733,1668,7734,7735,3986,2404, # 6150
4189,3567,3809,4190,7736,2302,1318,2505,3091, 133,3092,2873,4473, 629, 31,2838, # 6166
2697,3810,4474, 850, 949,4475,3987,2955,1732,2088,4191,1496,1852,7737,3988, 620, # 6182
3214, 981,1242,3679,3360,1619,3680,1643,3293,2139,2452,1970,1719,3463,2168,7738, # 6198
3215,7739,7740,3361,1828,7741,1277,4476,1565,2047,7742,1636,3568,3093,7743, 869, # 6214
2839, 655,3811,3812,3094,3989,3000,3813,1310,3569,4477,7744,7745,7746,1733, 558, # 6230
4478,3681, 335,1549,3045,1756,4192,3682,1945,3464,1829,1291,1192, 470,2726,2107, # 6246
2793, 913,1054,3990,7747,1027,7748,3046,3991,4479, 982,2662,3362,3148,3465,3216, # 6262
3217,1946,2794,7749, 571,4480,7750,1830,7751,3570,2583,1523,2424,7752,2089, 984, # 6278
4481,3683,1959,7753,3684, 852, 923,2795,3466,3685, 969,1519, 999,2048,2320,1705, # 6294
7754,3095, 615,1662, 151, 597,3992,2405,2321,1049, 275,4482,3686,4193, 568,3687, # 6310
3571,2480,4194,3688,7755,2425,2270, 409,3218,7756,1566,2874,3467,1002, 769,2840, # 6326
194,2090,3149,3689,2222,3294,4195, 628,1505,7757,7758,1763,2177,3001,3993, 521, # 6342
1161,2584,1787,2203,2406,4483,3994,1625,4196,4197, 412, 42,3096, 464,7759,2632, # 6358
4484,3363,1760,1571,2875,3468,2530,1219,2204,3814,2633,2140,2368,4485,4486,3295, # 6374
1651,3364,3572,7760,7761,3573,2481,3469,7762,3690,7763,7764,2271,2091, 460,7765, # 6390
4487,7766,3002, 962, 588,3574, 289,3219,2634,1116, 52,7767,3047,1796,7768,7769, # 6406
7770,1467,7771,1598,1143,3691,4198,1984,1734,1067,4488,1280,3365, 465,4489,1572, # 6422
510,7772,1927,2241,1812,1644,3575,7773,4490,3692,7774,7775,2663,1573,1534,7776, # 6438
7777,4199, 536,1807,1761,3470,3815,3150,2635,7778,7779,7780,4491,3471,2915,1911, # 6454
2796,7781,3296,1122, 377,3220,7782, 360,7783,7784,4200,1529, 551,7785,2059,3693, # 6470
1769,2426,7786,2916,4201,3297,3097,2322,2108,2030,4492,1404, 136,1468,1479, 672, # 6486
1171,3221,2303, 271,3151,7787,2762,7788,2049, 678,2727, 865,1947,4493,7789,2013, # 6502
3995,2956,7790,2728,2223,1397,3048,3694,4494,4495,1735,2917,3366,3576,7791,3816, # 6518
509,2841,2453,2876,3817,7792,7793,3152,3153,4496,4202,2531,4497,2304,1166,1010, # 6534
552, 681,1887,7794,7795,2957,2958,3996,1287,1596,1861,3154, 358, 453, 736, 175, # 6550
478,1117, 905,1167,1097,7796,1853,1530,7797,1706,7798,2178,3472,2287,3695,3473, # 6566
3577,4203,2092,4204,7799,3367,1193,2482,4205,1458,2190,2205,1862,1888,1421,3298, # 6582
2918,3049,2179,3474, 595,2122,7800,3997,7801,7802,4206,1707,2636, 223,3696,1359, # 6598
751,3098, 183,3475,7803,2797,3003, 419,2369, 633, 704,3818,2389, 241,7804,7805, # 6614
7806, 838,3004,3697,2272,2763,2454,3819,1938,2050,3998,1309,3099,2242,1181,7807, # 6630
1136,2206,3820,2370,1446,4207,2305,4498,7808,7809,4208,1055,2605, 484,3698,7810, # 6646
3999, 625,4209,2273,3368,1499,4210,4000,7811,4001,4211,3222,2274,2275,3476,7812, # 6662
7813,2764, 808,2606,3699,3369,4002,4212,3100,2532, 526,3370,3821,4213, 955,7814, # 6678
1620,4214,2637,2427,7815,1429,3700,1669,1831, 994, 928,7816,3578,1260,7817,7818, # 6694
7819,1948,2288, 741,2919,1626,4215,2729,2455, 867,1184, 362,3371,1392,7820,7821, # 6710
4003,4216,1770,1736,3223,2920,4499,4500,1928,2698,1459,1158,7822,3050,3372,2877, # 6726
1292,1929,2506,2842,3701,1985,1187,2071,2014,2607,4217,7823,2566,2507,2169,3702, # 6742
2483,3299,7824,3703,4501,7825,7826, 666,1003,3005,1022,3579,4218,7827,4502,1813, # 6758
2253, 574,3822,1603, 295,1535, 705,3823,4219, 283, 858, 417,7828,7829,3224,4503, # 6774
4504,3051,1220,1889,1046,2276,2456,4004,1393,1599, 689,2567, 388,4220,7830,2484, # 6790
802,7831,2798,3824,2060,1405,2254,7832,4505,3825,2109,1052,1345,3225,1585,7833, # 6806
809,7834,7835,7836, 575,2730,3477, 956,1552,1469,1144,2323,7837,2324,1560,2457, # 6822
3580,3226,4005, 616,2207,3155,2180,2289,7838,1832,7839,3478,4506,7840,1319,3704, # 6838
3705,1211,3581,1023,3227,1293,2799,7841,7842,7843,3826, 607,2306,3827, 762,2878, # 6854
1439,4221,1360,7844,1485,3052,7845,4507,1038,4222,1450,2061,2638,4223,1379,4508, # 6870
2585,7846,7847,4224,1352,1414,2325,2921,1172,7848,7849,3828,3829,7850,1797,1451, # 6886
7851,7852,7853,7854,2922,4006,4007,2485,2346, 411,4008,4009,3582,3300,3101,4509, # 6902
1561,2664,1452,4010,1375,7855,7856, 47,2959, 316,7857,1406,1591,2923,3156,7858, # 6918
1025,2141,3102,3157, 354,2731, 884,2224,4225,2407, 508,3706, 726,3583, 996,2428, # 6934
3584, 729,7859, 392,2191,1453,4011,4510,3707,7860,7861,2458,3585,2608,1675,2800, # 6950
919,2347,2960,2348,1270,4511,4012, 73,7862,7863, 647,7864,3228,2843,2255,1550, # 6966
1346,3006,7865,1332, 883,3479,7866,7867,7868,7869,3301,2765,7870,1212, 831,1347, # 6982
4226,4512,2326,3830,1863,3053, 720,3831,4513,4514,3832,7871,4227,7872,7873,4515, # 6998
7874,7875,1798,4516,3708,2609,4517,3586,1645,2371,7876,7877,2924, 669,2208,2665, # 7014
2429,7878,2879,7879,7880,1028,3229,7881,4228,2408,7882,2256,1353,7883,7884,4518, # 7030
3158, 518,7885,4013,7886,4229,1960,7887,2142,4230,7888,7889,3007,2349,2350,3833, # 7046
516,1833,1454,4014,2699,4231,4519,2225,2610,1971,1129,3587,7890,2766,7891,2961, # 7062
1422, 577,1470,3008,1524,3373,7892,7893, 432,4232,3054,3480,7894,2586,1455,2508, # 7078
2226,1972,1175,7895,1020,2732,4015,3481,4520,7896,2733,7897,1743,1361,3055,3482, # 7094
2639,4016,4233,4521,2290, 895, 924,4234,2170, 331,2243,3056, 166,1627,3057,1098, # 7110
7898,1232,2880,2227,3374,4522, 657, 403,1196,2372, 542,3709,3375,1600,4235,3483, # 7126
7899,4523,2767,3230, 576, 530,1362,7900,4524,2533,2666,3710,4017,7901, 842,3834, # 7142
7902,2801,2031,1014,4018, 213,2700,3376, 665, 621,4236,7903,3711,2925,2430,7904, # 7158
2431,3302,3588,3377,7905,4237,2534,4238,4525,3589,1682,4239,3484,1380,7906, 724, # 7174
2277, 600,1670,7907,1337,1233,4526,3103,2244,7908,1621,4527,7909, 651,4240,7910, # 7190
1612,4241,2611,7911,2844,7912,2734,2307,3058,7913, 716,2459,3059, 174,1255,2701, # 7206
4019,3590, 548,1320,1398, 728,4020,1574,7914,1890,1197,3060,4021,7915,3061,3062, # 7222
3712,3591,3713, 747,7916, 635,4242,4528,7917,7918,7919,4243,7920,7921,4529,7922, # 7238
3378,4530,2432, 451,7923,3714,2535,2072,4244,2735,4245,4022,7924,1764,4531,7925, # 7254
4246, 350,7926,2278,2390,2486,7927,4247,4023,2245,1434,4024, 488,4532, 458,4248, # 7270
4025,3715, 771,1330,2391,3835,2568,3159,2159,2409,1553,2667,3160,4249,7928,2487, # 7286
2881,2612,1720,2702,4250,3379,4533,7929,2536,4251,7930,3231,4252,2768,7931,2015, # 7302
2736,7932,1155,1017,3716,3836,7933,3303,2308, 201,1864,4253,1430,7934,4026,7935, # 7318
7936,7937,7938,7939,4254,1604,7940, 414,1865, 371,2587,4534,4535,3485,2016,3104, # 7334
4536,1708, 960,4255, 887, 389,2171,1536,1663,1721,7941,2228,4027,2351,2926,1580, # 7350
7942,7943,7944,1744,7945,2537,4537,4538,7946,4539,7947,2073,7948,7949,3592,3380, # 7366
2882,4256,7950,4257,2640,3381,2802, 673,2703,2460, 709,3486,4028,3593,4258,7951, # 7382
1148, 502, 634,7952,7953,1204,4540,3594,1575,4541,2613,3717,7954,3718,3105, 948, # 7398
3232, 121,1745,3837,1110,7955,4259,3063,2509,3009,4029,3719,1151,1771,3838,1488, # 7414
4030,1986,7956,2433,3487,7957,7958,2093,7959,4260,3839,1213,1407,2803, 531,2737, # 7430
2538,3233,1011,1537,7960,2769,4261,3106,1061,7961,3720,3721,1866,2883,7962,2017, # 7446
120,4262,4263,2062,3595,3234,2309,3840,2668,3382,1954,4542,7963,7964,3488,1047, # 7462
2704,1266,7965,1368,4543,2845, 649,3383,3841,2539,2738,1102,2846,2669,7966,7967, # 7478
1999,7968,1111,3596,2962,7969,2488,3842,3597,2804,1854,3384,3722,7970,7971,3385, # 7494
2410,2884,3304,3235,3598,7972,2569,7973,3599,2805,4031,1460, 856,7974,3600,7975, # 7510
2885,2963,7976,2886,3843,7977,4264, 632,2510, 875,3844,1697,3845,2291,7978,7979, # 7526
4544,3010,1239, 580,4545,4265,7980, 914, 936,2074,1190,4032,1039,2123,7981,7982, # 7542
7983,3386,1473,7984,1354,4266,3846,7985,2172,3064,4033, 915,3305,4267,4268,3306, # 7558
1605,1834,7986,2739, 398,3601,4269,3847,4034, 328,1912,2847,4035,3848,1331,4270, # 7574
3011, 937,4271,7987,3602,4036,4037,3387,2160,4546,3388, 524, 742, 538,3065,1012, # 7590
7988,7989,3849,2461,7990, 658,1103, 225,3850,7991,7992,4547,7993,4548,7994,3236, # 7606
1243,7995,4038, 963,2246,4549,7996,2705,3603,3161,7997,7998,2588,2327,7999,4550, # 7622
8000,8001,8002,3489,3307, 957,3389,2540,2032,1930,2927,2462, 870,2018,3604,1746, # 7638
2770,2771,2434,2463,8003,3851,8004,3723,3107,3724,3490,3390,3725,8005,1179,3066, # 7654
8006,3162,2373,4272,3726,2541,3163,3108,2740,4039,8007,3391,1556,2542,2292, 977, # 7670
2887,2033,4040,1205,3392,8008,1765,3393,3164,2124,1271,1689, 714,4551,3491,8009, # 7686
2328,3852, 533,4273,3605,2181, 617,8010,2464,3308,3492,2310,8011,8012,3165,8013, # 7702
8014,3853,1987, 618, 427,2641,3493,3394,8015,8016,1244,1690,8017,2806,4274,4552, # 7718
8018,3494,8019,8020,2279,1576, 473,3606,4275,3395, 972,8021,3607,8022,3067,8023, # 7734
8024,4553,4554,8025,3727,4041,4042,8026, 153,4555, 356,8027,1891,2888,4276,2143, # 7750
408, 803,2352,8028,3854,8029,4277,1646,2570,2511,4556,4557,3855,8030,3856,4278, # 7766
8031,2411,3396, 752,8032,8033,1961,2964,8034, 746,3012,2465,8035,4279,3728, 698, # 7782
4558,1892,4280,3608,2543,4559,3609,3857,8036,3166,3397,8037,1823,1302,4043,2706, # 7798
3858,1973,4281,8038,4282,3167, 823,1303,1288,1236,2848,3495,4044,3398, 774,3859, # 7814
8039,1581,4560,1304,2849,3860,4561,8040,2435,2161,1083,3237,4283,4045,4284, 344, # 7830
1173, 288,2311, 454,1683,8041,8042,1461,4562,4046,2589,8043,8044,4563, 985, 894, # 7846
8045,3399,3168,8046,1913,2928,3729,1988,8047,2110,1974,8048,4047,8049,2571,1194, # 7862
425,8050,4564,3169,1245,3730,4285,8051,8052,2850,8053, 636,4565,1855,3861, 760, # 7878
1799,8054,4286,2209,1508,4566,4048,1893,1684,2293,8055,8056,8057,4287,4288,2210, # 7894
479,8058,8059, 832,8060,4049,2489,8061,2965,2490,3731, 990,3109, 627,1814,2642, # 7910
4289,1582,4290,2125,2111,3496,4567,8062, 799,4291,3170,8063,4568,2112,1737,3013, # 7926
1018, 543, 754,4292,3309,1676,4569,4570,4050,8064,1489,8065,3497,8066,2614,2889, # 7942
4051,8067,8068,2966,8069,8070,8071,8072,3171,4571,4572,2182,1722,8073,3238,3239, # 7958
1842,3610,1715, 481, 365,1975,1856,8074,8075,1962,2491,4573,8076,2126,3611,3240, # 7974
433,1894,2063,2075,8077, 602,2741,8078,8079,8080,8081,8082,3014,1628,3400,8083, # 7990
3172,4574,4052,2890,4575,2512,8084,2544,2772,8085,8086,8087,3310,4576,2891,8088, # 8006
4577,8089,2851,4578,4579,1221,2967,4053,2513,8090,8091,8092,1867,1989,8093,8094, # 8022
8095,1895,8096,8097,4580,1896,4054, 318,8098,2094,4055,4293,8099,8100, 485,8101, # 8038
938,3862, 553,2670, 116,8102,3863,3612,8103,3498,2671,2773,3401,3311,2807,8104, # 8054
3613,2929,4056,1747,2930,2968,8105,8106, 207,8107,8108,2672,4581,2514,8109,3015, # 8070
890,3614,3864,8110,1877,3732,3402,8111,2183,2353,3403,1652,8112,8113,8114, 941, # 8086
2294, 208,3499,4057,2019, 330,4294,3865,2892,2492,3733,4295,8115,8116,8117,8118, # 8102
#Everything below is of no interest for detection purpose
2515,1613,4582,8119,3312,3866,2516,8120,4058,8121,1637,4059,2466,4583,3867,8122, # 8118
2493,3016,3734,8123,8124,2192,8125,8126,2162,8127,8128,8129,8130,8131,8132,8133, # 8134
8134,8135,8136,8137,8138,8139,8140,8141,8142,8143,8144,8145,8146,8147,8148,8149, # 8150
8150,8151,8152,8153,8154,8155,8156,8157,8158,8159,8160,8161,8162,8163,8164,8165, # 8166
8166,8167,8168,8169,8170,8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181, # 8182
8182,8183,8184,8185,8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197, # 8198
8198,8199,8200,8201,8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213, # 8214
8214,8215,8216,8217,8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229, # 8230
8230,8231,8232,8233,8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245, # 8246
8246,8247,8248,8249,8250,8251,8252,8253,8254,8255,8256,8257,8258,8259,8260,8261, # 8262
8262,8263,8264,8265,8266,8267,8268,8269,8270,8271,8272,8273,8274,8275,8276,8277, # 8278
8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,8290,8291,8292,8293, # 8294
8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,8308,8309, # 8310
8310,8311,8312,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322,8323,8324,8325, # 8326
8326,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337,8338,8339,8340,8341, # 8342
8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353,8354,8355,8356,8357, # 8358
8358,8359,8360,8361,8362,8363,8364,8365,8366,8367,8368,8369,8370,8371,8372,8373, # 8374
8374,8375,8376,8377,8378,8379,8380,8381,8382,8383,8384,8385,8386,8387,8388,8389, # 8390
8390,8391,8392,8393,8394,8395,8396,8397,8398,8399,8400,8401,8402,8403,8404,8405, # 8406
8406,8407,8408,8409,8410,8411,8412,8413,8414,8415,8416,8417,8418,8419,8420,8421, # 8422
8422,8423,8424,8425,8426,8427,8428,8429,8430,8431,8432,8433,8434,8435,8436,8437, # 8438
8438,8439,8440,8441,8442,8443,8444,8445,8446,8447,8448,8449,8450,8451,8452,8453, # 8454
8454,8455,8456,8457,8458,8459,8460,8461,8462,8463,8464,8465,8466,8467,8468,8469, # 8470
8470,8471,8472,8473,8474,8475,8476,8477,8478,8479,8480,8481,8482,8483,8484,8485, # 8486
8486,8487,8488,8489,8490,8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501, # 8502
8502,8503,8504,8505,8506,8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517, # 8518
8518,8519,8520,8521,8522,8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533, # 8534
8534,8535,8536,8537,8538,8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549, # 8550
8550,8551,8552,8553,8554,8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,8565, # 8566
8566,8567,8568,8569,8570,8571,8572,8573,8574,8575,8576,8577,8578,8579,8580,8581, # 8582
8582,8583,8584,8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597, # 8598
8598,8599,8600,8601,8602,8603,8604,8605,8606,8607,8608,8609,8610,8611,8612,8613, # 8614
8614,8615,8616,8617,8618,8619,8620,8621,8622,8623,8624,8625,8626,8627,8628,8629, # 8630
8630,8631,8632,8633,8634,8635,8636,8637,8638,8639,8640,8641,8642,8643,8644,8645, # 8646
8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,8657,8658,8659,8660,8661, # 8662
8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672,8673,8674,8675,8676,8677, # 8678
8678,8679,8680,8681,8682,8683,8684,8685,8686,8687,8688,8689,8690,8691,8692,8693, # 8694
8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,8704,8705,8706,8707,8708,8709, # 8710
8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,8720,8721,8722,8723,8724,8725, # 8726
8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,8736,8737,8738,8739,8740,8741) # 8742
# flake8: noqa
|
KellyChan/python-examples | refs/heads/master | javascript/backbone/backbone-templates/backbone-fileupload/venvs/lib/python2.7/site-packages/django/conf/locale/mk/formats.py | 251 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j. F Y. H:i'
YEAR_MONTH_FORMAT = 'F Y.'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.m.Y.'
SHORT_DATETIME_FORMAT = 'j.m.Y. H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y.', '%d.%m.%y.', # '25.10.2006.', '25.10.06.'
'%d. %m. %Y.', '%d. %m. %y.', # '25. 10. 2006.', '25. 10. 06.'
'%Y-%m-%d', # '2006-10-25'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y. %H:%M:%S', # '25.10.2006. 14:30:59'
'%d.%m.%Y. %H:%M', # '25.10.2006. 14:30'
'%d.%m.%Y.', # '25.10.2006.'
'%d.%m.%y. %H:%M:%S', # '25.10.06. 14:30:59'
'%d.%m.%y. %H:%M', # '25.10.06. 14:30'
'%d.%m.%y.', # '25.10.06.'
'%d. %m. %Y. %H:%M:%S', # '25. 10. 2006. 14:30:59'
'%d. %m. %Y. %H:%M', # '25. 10. 2006. 14:30'
'%d. %m. %Y.', # '25. 10. 2006.'
'%d. %m. %y. %H:%M:%S', # '25. 10. 06. 14:30:59'
'%d. %m. %y. %H:%M', # '25. 10. 06. 14:30'
'%d. %m. %y.', # '25. 10. 06.'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
sinbazhou/odoo | refs/heads/8.0 | addons/payment_ogone/tests/test_ogone.py | 430 | # -*- coding: utf-8 -*-
from lxml import objectify
import time
import urlparse
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment.tests.common import PaymentAcquirerCommon
from openerp.addons.payment_ogone.controllers.main import OgoneController
from openerp.tools import mute_logger
class OgonePayment(PaymentAcquirerCommon):
def setUp(self):
super(OgonePayment, self).setUp()
cr, uid = self.cr, self.uid
self.base_url = self.registry('ir.config_parameter').get_param(cr, uid, 'web.base.url')
# get the adyen account
model, self.ogone_id = self.registry('ir.model.data').get_object_reference(cr, uid, 'payment_ogone', 'payment_acquirer_ogone')
def test_10_ogone_form_render(self):
cr, uid, context = self.cr, self.uid, {}
# be sure not to do stupid thing
ogone = self.payment_acquirer.browse(self.cr, self.uid, self.ogone_id, None)
self.assertEqual(ogone.environment, 'test', 'test without test environment')
# ----------------------------------------
# Test: button direct rendering + shasign
# ----------------------------------------
form_values = {
'PSPID': 'dummy',
'ORDERID': 'test_ref0',
'AMOUNT': '1',
'CURRENCY': 'EUR',
'LANGUAGE': 'en_US',
'CN': 'Norbert Buyer',
'EMAIL': 'norbert.buyer@example.com',
'OWNERZIP': '1000',
'OWNERADDRESS': 'Huge Street 2/543',
'OWNERCTY': 'Belgium',
'OWNERTOWN': 'Sin City',
'OWNERTELNO': '0032 12 34 56 78',
'SHASIGN': '815f67b8ff70d234ffcf437c13a9fa7f807044cc',
'ACCEPTURL': '%s' % urlparse.urljoin(self.base_url, OgoneController._accept_url),
'DECLINEURL': '%s' % urlparse.urljoin(self.base_url, OgoneController._decline_url),
'EXCEPTIONURL': '%s' % urlparse.urljoin(self.base_url, OgoneController._exception_url),
'CANCELURL': '%s' % urlparse.urljoin(self.base_url, OgoneController._cancel_url),
}
# render the button
res = self.payment_acquirer.render(
cr, uid, self.ogone_id,
'test_ref0', 0.01, self.currency_euro_id,
partner_id=None,
partner_values=self.buyer_values,
context=context)
# check form result
tree = objectify.fromstring(res)
self.assertEqual(tree.get('action'), 'https://secure.ogone.com/ncol/test/orderstandard.asp', 'ogone: wrong form POST url')
for form_input in tree.input:
if form_input.get('name') in ['submit']:
continue
self.assertEqual(
form_input.get('value'),
form_values[form_input.get('name')],
'ogone: wrong value for input %s: received %s instead of %s' % (form_input.get('name'), form_input.get('value'), form_values[form_input.get('name')])
)
# ----------------------------------------
# Test2: button using tx + validation
# ----------------------------------------
# create a new draft tx
tx_id = self.payment_transaction.create(
cr, uid, {
'amount': 0.01,
'acquirer_id': self.ogone_id,
'currency_id': self.currency_euro_id,
'reference': 'test_ref0',
'partner_id': self.buyer_id,
}, context=context
)
# render the button
res = self.payment_acquirer.render(
cr, uid, self.ogone_id,
'should_be_erased', 0.01, self.currency_euro,
tx_id=tx_id,
partner_id=None,
partner_values=self.buyer_values,
context=context)
# check form result
tree = objectify.fromstring(res)
self.assertEqual(tree.get('action'), 'https://secure.ogone.com/ncol/test/orderstandard.asp', 'ogone: wrong form POST url')
for form_input in tree.input:
if form_input.get('name') in ['submit']:
continue
self.assertEqual(
form_input.get('value'),
form_values[form_input.get('name')],
'ogone: wrong value for form input %s: received %s instead of %s' % (form_input.get('name'), form_input.get('value'), form_values[form_input.get('name')])
)
@mute_logger('openerp.addons.payment_ogone.models.ogone', 'ValidationError')
def test_20_ogone_form_management(self):
cr, uid, context = self.cr, self.uid, {}
# be sure not to do stupid thing
ogone = self.payment_acquirer.browse(self.cr, self.uid, self.ogone_id, None)
self.assertEqual(ogone.environment, 'test', 'test without test environment')
# typical data posted by ogone after client has successfully paid
ogone_post_data = {
'orderID': u'test_ref_2',
'STATUS': u'9',
'CARDNO': u'XXXXXXXXXXXX0002',
'PAYID': u'25381582',
'CN': u'Norbert Buyer',
'NCERROR': u'0',
'TRXDATE': u'11/15/13',
'IP': u'85.201.233.72',
'BRAND': u'VISA',
'ACCEPTANCE': u'test123',
'currency': u'EUR',
'amount': u'1.95',
'SHASIGN': u'7B7B0ED9CBC4A85543A9073374589033A62A05A5',
'ED': u'0315',
'PM': u'CreditCard'
}
# should raise error about unknown tx
with self.assertRaises(ValidationError):
self.payment_transaction.ogone_form_feedback(cr, uid, ogone_post_data, context=context)
# create tx
tx_id = self.payment_transaction.create(
cr, uid, {
'amount': 1.95,
'acquirer_id': self.ogone_id,
'currency_id': self.currency_euro_id,
'reference': 'test_ref_2',
'partner_name': 'Norbert Buyer',
'partner_country_id': self.country_france_id,
}, context=context
)
# validate it
self.payment_transaction.ogone_form_feedback(cr, uid, ogone_post_data, context=context)
# check state
tx = self.payment_transaction.browse(cr, uid, tx_id, context=context)
self.assertEqual(tx.state, 'done', 'ogone: validation did not put tx into done state')
self.assertEqual(tx.ogone_payid, ogone_post_data.get('PAYID'), 'ogone: validation did not update tx payid')
# reset tx
tx.write({'state': 'draft', 'date_validate': False, 'ogone_payid': False})
# now ogone post is ok: try to modify the SHASIGN
ogone_post_data['SHASIGN'] = 'a4c16bae286317b82edb49188d3399249a784691'
with self.assertRaises(ValidationError):
self.payment_transaction.ogone_form_feedback(cr, uid, ogone_post_data, context=context)
# simulate an error
ogone_post_data['STATUS'] = 2
ogone_post_data['SHASIGN'] = 'a4c16bae286317b82edb49188d3399249a784691'
self.payment_transaction.ogone_form_feedback(cr, uid, ogone_post_data, context=context)
# check state
tx = self.payment_transaction.browse(cr, uid, tx_id, context=context)
self.assertEqual(tx.state, 'error', 'ogone: erroneous validation did not put tx into error state')
def test_30_ogone_s2s(self):
test_ref = 'test_ref_%.15f' % time.time()
cr, uid, context = self.cr, self.uid, {}
# be sure not to do stupid thing
ogone = self.payment_acquirer.browse(self.cr, self.uid, self.ogone_id, None)
self.assertEqual(ogone.environment, 'test', 'test without test environment')
# create a new draft tx
tx_id = self.payment_transaction.create(
cr, uid, {
'amount': 0.01,
'acquirer_id': self.ogone_id,
'currency_id': self.currency_euro_id,
'reference': test_ref,
'partner_id': self.buyer_id,
'type': 'server2server',
}, context=context
)
# create an alias
res = self.payment_transaction.ogone_s2s_create_alias(
cr, uid, tx_id, {
'expiry_date_mm': '01',
'expiry_date_yy': '2015',
'holder_name': 'Norbert Poilu',
'number': '4000000000000002',
'brand': 'VISA',
}, context=context)
# check an alias is set, containing at least OPENERP
tx = self.payment_transaction.browse(cr, uid, tx_id, context=context)
self.assertIn('OPENERP', tx.partner_reference, 'ogone: wrong partner reference after creating an alias')
res = self.payment_transaction.ogone_s2s_execute(cr, uid, tx_id, {}, context=context)
# print res
# {
# 'orderID': u'reference',
# 'STATUS': u'9',
# 'CARDNO': u'XXXXXXXXXXXX0002',
# 'PAYID': u'24998692',
# 'CN': u'Norbert Poilu',
# 'NCERROR': u'0',
# 'TRXDATE': u'11/05/13',
# 'IP': u'85.201.233.72',
# 'BRAND': u'VISA',
# 'ACCEPTANCE': u'test123',
# 'currency': u'EUR',
# 'amount': u'1.95',
# 'SHASIGN': u'EFDC56879EF7DE72CCF4B397076B5C9A844CB0FA',
# 'ED': u'0314',
# 'PM': u'CreditCard'
# }
|
azureplus/hue | refs/heads/master | desktop/libs/libzookeeper/setup.py | 30 | # Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
from hueversion import VERSION
setup(
name = "libzookeeper",
version = VERSION,
url = 'http://github.com/cloudera/hue',
description = "ZooKeeper Libraries",
packages = find_packages('src'),
package_dir = {'': 'src' },
install_requires = ['setuptools', 'desktop'],
# Even libraries need to be registered as desktop_apps,
# if they have configuration, like this one.
entry_points = { 'desktop.sdk.lib': 'libzookeeper=libzookeeper' },
)
|
wesleybowman/UTide | refs/heads/master | tests/test_harmonics.py | 1 | """
Test for FUV in harmonics.py.
The test data are generated using octave with a slight
modification of ut_FUV extracted from ut_solv.m. The
data-generating script is make_FUV_data.py.
"""
import os
from numpy.testing import assert_array_almost_equal
from utide._ut_constants import _base_dir
from utide.harmonics import FUV
from utide.utilities import loadbunch
fname = os.path.join(_base_dir, "FUV0.npz")
def test_FUV():
x = loadbunch(fname, masked=False)
# Switch epoch from Matlab to Python
x.t -= 366
x.t0 -= 366
for i, flag in enumerate(x.flags):
F, U, V = FUV(x.t, x.t0, x.lind - 1, x.lat, flag)
print(f"i: {i} ngflags: {flag}")
# We use broadcasting instead of replication, so
# we need to test only against the first row of
# the octave output in such cases.
if F.shape[0] == 1:
sub = (i, slice(0, 1))
else:
sub = (i,)
assert_array_almost_equal(F, x.Fo[sub])
assert_array_almost_equal(U, x.Uo[sub])
if V.shape[0] == 1:
sub = (i, slice(0, 1))
else:
sub = (i,)
assert_array_almost_equal(V, x.Vo[sub])
|
aminhp93/learning_python | refs/heads/master | src/tags/apps.py | 13 | from django.apps import AppConfig
class TagsConfig(AppConfig):
name = 'tags'
|
mdaniel/intellij-community | refs/heads/master | python/testData/inspections/NumpyDocStringRemoveCombinedVarargParam_after.py | 52 | def f():
"""
Parameters
==========
x, **kwargs
no one writes like that
""" |
pra85/calibre | refs/heads/master | src/html5lib/filters/optionaltags.py | 129 | import _base
class Filter(_base.Filter):
def slider(self):
previous1 = previous2 = None
for token in self.source:
if previous1 is not None:
yield previous2, previous1, token
previous2 = previous1
previous1 = token
yield previous2, previous1, None
def __iter__(self):
for previous, token, next in self.slider():
type = token["type"]
if type == "StartTag":
if (token["data"] or
not self.is_optional_start(token["name"], previous, next)):
yield token
elif type == "EndTag":
if not self.is_optional_end(token["name"], next):
yield token
else:
yield token
def is_optional_start(self, tagname, previous, next):
type = next and next["type"] or None
if tagname in 'html':
# An html element's start tag may be omitted if the first thing
# inside the html element is not a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname == 'head':
# A head element's start tag may be omitted if the first thing
# inside the head element is an element.
# XXX: we also omit the start tag if the head element is empty
if type in ("StartTag", "EmptyTag"):
return True
elif type == "EndTag":
return next["name"] == "head"
elif tagname == 'body':
# A body element's start tag may be omitted if the first thing
# inside the body element is not a space character or a comment,
# except if the first thing inside the body element is a script
# or style element and the node immediately preceding the body
# element is a head element whose end tag has been omitted.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we do not look at the preceding event, so we never omit
# the body element's start tag if it's followed by a script or
# a style element.
return next["name"] not in ('script', 'style')
else:
return True
elif tagname == 'colgroup':
# A colgroup element's start tag may be omitted if the first thing
# inside the colgroup element is a col element, and if the element
# is not immediately preceeded by another colgroup element whose
# end tag has been omitted.
if type in ("StartTag", "EmptyTag"):
# XXX: we do not look at the preceding event, so instead we never
# omit the colgroup element's end tag when it is immediately
# followed by another colgroup element. See is_optional_end.
return next["name"] == "col"
else:
return False
elif tagname == 'tbody':
# A tbody element's start tag may be omitted if the first thing
# inside the tbody element is a tr element, and if the element is
# not immediately preceeded by a tbody, thead, or tfoot element
# whose end tag has been omitted.
if type == "StartTag":
# omit the thead and tfoot elements' end tag when they are
# immediately followed by a tbody element. See is_optional_end.
if previous and previous['type'] == 'EndTag' and \
previous['name'] in ('tbody','thead','tfoot'):
return False
return next["name"] == 'tr'
else:
return False
return False
def is_optional_end(self, tagname, next):
type = next and next["type"] or None
if tagname in ('html', 'head', 'body'):
# An html element's end tag may be omitted if the html element
# is not immediately followed by a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname in ('li', 'optgroup', 'tr'):
# A li element's end tag may be omitted if the li element is
# immediately followed by another li element or if there is
# no more content in the parent element.
# An optgroup element's end tag may be omitted if the optgroup
# element is immediately followed by another optgroup element,
# or if there is no more content in the parent element.
# A tr element's end tag may be omitted if the tr element is
# immediately followed by another tr element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] == tagname
else:
return type == "EndTag" or type is None
elif tagname in ('dt', 'dd'):
# A dt element's end tag may be omitted if the dt element is
# immediately followed by another dt element or a dd element.
# A dd element's end tag may be omitted if the dd element is
# immediately followed by another dd element or a dt element,
# or if there is no more content in the parent element.
if type == "StartTag":
return next["name"] in ('dt', 'dd')
elif tagname == 'dd':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'p':
# A p element's end tag may be omitted if the p element is
# immediately followed by an address, article, aside,
# blockquote, datagrid, dialog, dir, div, dl, fieldset,
# footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu,
# nav, ol, p, pre, section, table, or ul, element, or if
# there is no more content in the parent element.
if type in ("StartTag", "EmptyTag"):
return next["name"] in ('address', 'article', 'aside',
'blockquote', 'datagrid', 'dialog',
'dir', 'div', 'dl', 'fieldset', 'footer',
'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'header', 'hr', 'menu', 'nav', 'ol',
'p', 'pre', 'section', 'table', 'ul')
else:
return type == "EndTag" or type is None
elif tagname == 'option':
# An option element's end tag may be omitted if the option
# element is immediately followed by another option element,
# or if it is immediately followed by an <code>optgroup</code>
# element, or if there is no more content in the parent
# element.
if type == "StartTag":
return next["name"] in ('option', 'optgroup')
else:
return type == "EndTag" or type is None
elif tagname in ('rt', 'rp'):
# An rt element's end tag may be omitted if the rt element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
# An rp element's end tag may be omitted if the rp element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('rt', 'rp')
else:
return type == "EndTag" or type is None
elif tagname == 'colgroup':
# A colgroup element's end tag may be omitted if the colgroup
# element is not immediately followed by a space character or
# a comment.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we also look for an immediately following colgroup
# element. See is_optional_start.
return next["name"] != 'colgroup'
else:
return True
elif tagname in ('thead', 'tbody'):
# A thead element's end tag may be omitted if the thead element
# is immediately followed by a tbody or tfoot element.
# A tbody element's end tag may be omitted if the tbody element
# is immediately followed by a tbody or tfoot element, or if
# there is no more content in the parent element.
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] in ['tbody', 'tfoot']
elif tagname == 'tbody':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'tfoot':
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] == 'tbody'
else:
return type == "EndTag" or type is None
elif tagname in ('td', 'th'):
# A td element's end tag may be omitted if the td element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
# A th element's end tag may be omitted if the th element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('td', 'th')
else:
return type == "EndTag" or type is None
return False
|
lthurlow/Network-Grapher | refs/heads/master | proj/external/matplotlib-1.2.1/lib/mpl_examples/api/sankey_demo_rankine.py | 6 | """Demonstrate the Sankey class with a practicle example of a Rankine power cycle.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.sankey import Sankey
fig = plt.figure(figsize=(8, 12))
ax = fig.add_subplot(1, 1, 1, xticks=[], yticks=[],
title="Rankine Power Cycle: Example 8.6 from Moran and Shapiro\n"
+ "\x22Fundamentals of Engineering Thermodynamics\x22, 6th ed., 2008")
Hdot = [260.431, 35.078, 180.794, 221.115, 22.700,
142.361, 10.193, 10.210, 43.670, 44.312,
68.631, 10.758, 10.758, 0.017, 0.642,
232.121, 44.559, 100.613, 132.168] # MW
sankey = Sankey(ax=ax, format='%.3G', unit=' MW', gap=0.5, scale=1.0/Hdot[0])
sankey.add(patchlabel='\n\nPump 1', rotation=90, facecolor='#37c959',
flows=[Hdot[13], Hdot[6], -Hdot[7]],
labels=['Shaft power', '', None],
pathlengths=[0.4, 0.883, 0.25],
orientations=[1, -1, 0])
sankey.add(patchlabel='\n\nOpen\nheater', facecolor='#37c959',
flows=[Hdot[11], Hdot[7], Hdot[4], -Hdot[8]],
labels=[None, '', None, None],
pathlengths=[0.25, 0.25, 1.93, 0.25],
orientations=[1, 0, -1, 0], prior=0, connect=(2, 1))
sankey.add(patchlabel='\n\nPump 2', facecolor='#37c959',
flows=[Hdot[14], Hdot[8], -Hdot[9]],
labels=['Shaft power', '', None],
pathlengths=[0.4, 0.25, 0.25],
orientations=[1, 0, 0], prior=1, connect=(3, 1))
sankey.add(patchlabel='Closed\nheater', trunklength=2.914, fc='#37c959',
flows=[Hdot[9], Hdot[1], -Hdot[11], -Hdot[10]],
pathlengths=[0.25, 1.543, 0.25, 0.25],
labels=['', '', None, None],
orientations=[0, -1, 1, -1], prior=2, connect=(2, 0))
sankey.add(patchlabel='Trap', facecolor='#37c959', trunklength=5.102,
flows=[Hdot[11], -Hdot[12]],
labels=['\n', None],
pathlengths=[1.0, 1.01],
orientations=[1, 1], prior=3, connect=(2, 0))
sankey.add(patchlabel='Steam\ngenerator', facecolor='#ff5555',
flows=[Hdot[15], Hdot[10], Hdot[2], -Hdot[3], -Hdot[0]],
labels=['Heat rate', '', '', None, None],
pathlengths=0.25,
orientations=[1, 0, -1, -1, -1], prior=3, connect=(3, 1))
sankey.add(patchlabel='\n\n\nTurbine 1', facecolor='#37c959',
flows=[Hdot[0], -Hdot[16], -Hdot[1], -Hdot[2]],
labels=['', None, None, None],
pathlengths=[0.25, 0.153, 1.543, 0.25],
orientations=[0, 1, -1, -1], prior=5, connect=(4, 0))
sankey.add(patchlabel='\n\n\nReheat', facecolor='#37c959',
flows=[Hdot[2], -Hdot[2]],
labels=[None, None],
pathlengths=[0.725, 0.25],
orientations=[-1, 0], prior=6, connect=(3, 0))
sankey.add(patchlabel='Turbine 2', trunklength=3.212, facecolor='#37c959',
flows=[Hdot[3], Hdot[16], -Hdot[5], -Hdot[4], -Hdot[17]],
labels=[None, 'Shaft power', None, '', 'Shaft power'],
pathlengths=[0.751, 0.15, 0.25, 1.93, 0.25],
orientations=[0, -1, 0, -1, 1], prior=6, connect=(1, 1))
sankey.add(patchlabel='Condenser', facecolor='#58b1fa', trunklength=1.764,
flows=[Hdot[5], -Hdot[18], -Hdot[6]],
labels=['', 'Heat rate', None],
pathlengths=[0.45, 0.25, 0.883],
orientations=[-1, 1, 0], prior=8, connect=(2, 0))
diagrams = sankey.finish()
for diagram in diagrams:
diagram.text.set_fontweight('bold')
diagram.text.set_fontsize('10')
for text in diagram.texts:
text.set_fontsize('10')
# Notice that the explicit connections are handled automatically, but the
# implicit ones currently are not. The lengths of the paths and the trunks
# must be adjusted manually, and that is a bit tricky.
plt.show()
|
bcarroll/authmgr | refs/heads/master | python-3.6.2-Win64/Lib/site-packages/alembic/testing/fixtures.py | 39 | # coding: utf-8
import io
import re
from sqlalchemy import create_engine, text, MetaData
import alembic
from ..util.compat import configparser
from .. import util
from ..util.compat import string_types, text_type
from ..migration import MigrationContext
from ..environment import EnvironmentContext
from ..operations import Operations
from contextlib import contextmanager
from .plugin.plugin_base import SkipTest
from .assertions import _get_dialect, eq_
from . import mock
testing_config = configparser.ConfigParser()
testing_config.read(['test.cfg'])
if not util.sqla_094:
class TestBase(object):
# A sequence of database names to always run, regardless of the
# constraints below.
__whitelist__ = ()
# A sequence of requirement names matching testing.requires decorators
__requires__ = ()
# A sequence of dialect names to exclude from the test class.
__unsupported_on__ = ()
# If present, test class is only runnable for the *single* specified
# dialect. If you need multiple, use __unsupported_on__ and invert.
__only_on__ = None
# A sequence of no-arg callables. If any are True, the entire testcase is
# skipped.
__skip_if__ = None
def assert_(self, val, msg=None):
assert val, msg
# apparently a handful of tests are doing this....OK
def setup(self):
if hasattr(self, "setUp"):
self.setUp()
def teardown(self):
if hasattr(self, "tearDown"):
self.tearDown()
else:
from sqlalchemy.testing.fixtures import TestBase
def capture_db():
buf = []
def dump(sql, *multiparams, **params):
buf.append(str(sql.compile(dialect=engine.dialect)))
engine = create_engine("postgresql://", strategy="mock", executor=dump)
return engine, buf
_engs = {}
@contextmanager
def capture_context_buffer(**kw):
if kw.pop('bytes_io', False):
buf = io.BytesIO()
else:
buf = io.StringIO()
kw.update({
'dialect_name': "sqlite",
'output_buffer': buf
})
conf = EnvironmentContext.configure
def configure(*arg, **opt):
opt.update(**kw)
return conf(*arg, **opt)
with mock.patch.object(EnvironmentContext, "configure", configure):
yield buf
def op_fixture(
dialect='default', as_sql=False,
naming_convention=None, literal_binds=False):
opts = {}
if naming_convention:
if not util.sqla_092:
raise SkipTest(
"naming_convention feature requires "
"sqla 0.9.2 or greater")
opts['target_metadata'] = MetaData(naming_convention=naming_convention)
class buffer_(object):
def __init__(self):
self.lines = []
def write(self, msg):
msg = msg.strip()
msg = re.sub(r'[\n\t]', '', msg)
if as_sql:
# the impl produces soft tabs,
# so search for blocks of 4 spaces
msg = re.sub(r' ', '', msg)
msg = re.sub('\;\n*$', '', msg)
self.lines.append(msg)
def flush(self):
pass
buf = buffer_()
class ctx(MigrationContext):
def clear_assertions(self):
buf.lines[:] = []
def assert_(self, *sql):
# TODO: make this more flexible about
# whitespace and such
eq_(buf.lines, list(sql))
def assert_contains(self, sql):
for stmt in buf.lines:
if sql in stmt:
return
else:
assert False, "Could not locate fragment %r in %r" % (
sql,
buf.lines
)
if as_sql:
opts['as_sql'] = as_sql
if literal_binds:
opts['literal_binds'] = literal_binds
ctx_dialect = _get_dialect(dialect)
if not as_sql:
def execute(stmt, *multiparam, **param):
if isinstance(stmt, string_types):
stmt = text(stmt)
assert stmt.supports_execution
sql = text_type(stmt.compile(dialect=ctx_dialect))
buf.write(sql)
connection = mock.Mock(dialect=ctx_dialect, execute=execute)
else:
opts['output_buffer'] = buf
connection = None
context = ctx(
ctx_dialect,
connection,
opts)
alembic.op._proxy = Operations(context)
return context
|
fernandog/Medusa | refs/heads/optimized | ext/js2py/translators/translator.py | 1 | import pyjsparser
import pyjsparser.parser
from . import translating_nodes
import hashlib
import re
# Enable Js2Py exceptions and pyimport in parser
pyjsparser.parser.ENABLE_PYIMPORT = True
# the re below is how we'll recognise numeric constants.
# it finds any 'simple numeric that is not preceded with an alphanumeric character
# the numeric can be a float (so a dot is found) but
# it does not recognise notation such as 123e5, 0xFF, infinity or NaN
CP_NUMERIC_RE = re.compile(r'(?<![a-zA-Z0-9_"\'])([0-9\.]+)')
CP_NUMERIC_PLACEHOLDER = '__PyJsNUM_%i_PyJsNUM__'
CP_NUMERIC_PLACEHOLDER_REVERSE_RE = re.compile(
CP_NUMERIC_PLACEHOLDER.replace('%i', '([0-9\.]+)')
)
# the re below is how we'll recognise string constants
# it finds a ' or ", then reads until the next matching ' or "
# this re only services simple cases, it can not be used when
# there are escaped quotes in the expression
#CP_STRING_1 = re.compile(r'(["\'])(.*?)\1') # this is how we'll recognise string constants
CP_STRING = '"([^\\\\"]+|\\\\([bfnrtv\'"\\\\]|[0-3]?[0-7]{1,2}|x[0-9a-fA-F]{2}|u[0-9a-fA-F]{4}))*"|\'([^\\\\\']+|\\\\([bfnrtv\'"\\\\]|[0-3]?[0-7]{1,2}|x[0-9a-fA-F]{2}|u[0-9a-fA-F]{4}))*\''
CP_STRING_RE = re.compile(CP_STRING) # this is how we'll recognise string constants
CP_STRING_PLACEHOLDER = '__PyJsSTR_%i_PyJsSTR__'
CP_STRING_PLACEHOLDER_REVERSE_RE = re.compile(
CP_STRING_PLACEHOLDER.replace('%i', '([0-9\.]+)')
)
cache = {}
# This crap is still needed but I removed it for speed reasons. Have to think ofa better idea
# import js2py.pyjs, sys
# # Redefine builtin objects... Do you have a better idea?
# for m in list(sys.modules):
# if m.startswith('js2py'):
# del sys.modules[m]
# del js2py.pyjs
# del js2py
DEFAULT_HEADER = u'''from js2py.pyjs import *
# setting scope
var = Scope( JS_BUILTINS )
set_global_object(var)
# Code follows:
'''
def dbg(x):
"""does nothing, legacy dummy function"""
return ''
def translate_js(js, HEADER=DEFAULT_HEADER, use_compilation_plan=False):
"""js has to be a javascript source code.
returns equivalent python code."""
if use_compilation_plan and not '//' in js and not '/*' in js:
return translate_js_with_compilation_plan(js, HEADER=HEADER)
parser = pyjsparser.PyJsParser()
parsed = parser.parse(js) # js to esprima syntax tree
# Another way of doing that would be with my auto esprima translation but its much slower and causes import problems:
# parsed = esprima.parse(js).to_dict()
translating_nodes.clean_stacks()
return HEADER + translating_nodes.trans(parsed) # syntax tree to python code
class match_unumerator(object):
"""This class ise used """
matchcount = -1
def __init__(self, placeholder_mask):
self.placeholder_mask = placeholder_mask
self.matches = []
def __call__(self, match):
self.matchcount += 1
self.matches.append(match.group(0))
return self.placeholder_mask%self.matchcount
def __repr__(self):
return '\n'.join(self.placeholder_mask%counter + '=' + match for counter, match in enumerate(self.matches))
def wrap_up(self, output):
for counter, value in enumerate(self.matches):
output = output.replace("u'" + self.placeholder_mask%(counter) + "'", value, 1)
return output
def get_compilation_plan(js):
match_increaser_str = match_unumerator(CP_STRING_PLACEHOLDER)
compilation_plan = re.sub(
CP_STRING, match_increaser_str, js
)
match_increaser_num = match_unumerator(CP_NUMERIC_PLACEHOLDER)
compilation_plan = re.sub(CP_NUMERIC_RE, match_increaser_num, compilation_plan)
# now put quotes, note that just patching string replaces is somewhat faster than
# using another re:
compilation_plan = compilation_plan.replace('__PyJsNUM_', '"__PyJsNUM_').replace('_PyJsNUM__', '_PyJsNUM__"')
compilation_plan = compilation_plan.replace('__PyJsSTR_', '"__PyJsSTR_').replace('_PyJsSTR__', '_PyJsSTR__"')
return match_increaser_str, match_increaser_num, compilation_plan
def translate_js_with_compilation_plan(js, HEADER=DEFAULT_HEADER):
"""js has to be a javascript source code.
returns equivalent python code.
compile plans only work with the following restrictions:
- only enabled for oneliner expressions
- when there are comments in the js code string substitution is disabled
- when there nested escaped quotes string substitution is disabled, so
cacheable:
Q1 == 1 && name == 'harry'
not cacheable:
Q1 == 1 && name == 'harry' // some comment
not cacheable:
Q1 == 1 && name == 'o\'Reilly'
not cacheable:
Q1 == 1 && name /* some comment */ == 'o\'Reilly'
"""
match_increaser_str, match_increaser_num, compilation_plan = get_compilation_plan(js)
cp_hash = hashlib.md5(compilation_plan.encode('utf-8')).digest()
try:
python_code = cache[cp_hash]['proto_python_code']
except:
parser = pyjsparser.PyJsParser()
parsed = parser.parse(compilation_plan) # js to esprima syntax tree
# Another way of doing that would be with my auto esprima translation but its much slower and causes import problems:
# parsed = esprima.parse(js).to_dict()
translating_nodes.clean_stacks()
python_code = translating_nodes.trans(parsed) # syntax tree to python code
cache[cp_hash] = {
'compilation_plan': compilation_plan,
'proto_python_code': python_code,
}
python_code = match_increaser_str.wrap_up(python_code)
python_code = match_increaser_num.wrap_up(python_code)
return HEADER + python_code
def trasnlate(js, HEADER=DEFAULT_HEADER):
"""js has to be a javascript source code.
returns equivalent python code.
Equivalent to translate_js"""
return translate_js(js, HEADER)
syntax_tree_translate = translating_nodes.trans
if __name__=='__main__':
PROFILE = False
import js2py
import codecs
def main():
with codecs.open("esprima.js", "r", "utf-8") as f:
d = f.read()
r = js2py.translate_js(d)
with open('res.py','wb') as f2:
f2.write(r)
exec(r, {})
if PROFILE:
import cProfile
cProfile.run('main()', sort='tottime')
else:
main() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.